diff --git a/src/mistralai/async_client.py b/src/mistralai/async_client.py index 3f1ffffb..710c9b49 100644 --- a/src/mistralai/async_client.py +++ b/src/mistralai/async_client.py @@ -133,6 +133,7 @@ async def chat( top_p: Optional[float] = None, random_seed: Optional[int] = None, safe_mode: bool = False, + safe_prompt: bool = False, ) -> ChatCompletionResponse: """A asynchronous chat endpoint that returns a single response. @@ -145,7 +146,8 @@ async def chat( top_p (Optional[float], optional): the cumulative probability of tokens to generate, e.g. 0.9. Defaults to None. random_seed (Optional[int], optional): the random seed to use for sampling, e.g. 42. Defaults to None. - safe_mode (bool, optional): whether to use safe mode, e.g. true. Defaults to False. + safe_mode (bool, optional): deprecated, use safe_prompt instead. Defaults to False. + safe_prompt (bool, optional): whether to use safe prompt, e.g. true. Defaults to False. Returns: ChatCompletionResponse: a response object containing the generated text. @@ -158,7 +160,7 @@ async def chat( top_p=top_p, random_seed=random_seed, stream=False, - safe_mode=safe_mode, + safe_prompt=safe_mode or safe_prompt, ) single_response = self._request("post", request, "v1/chat/completions") @@ -177,6 +179,7 @@ async def chat_stream( top_p: Optional[float] = None, random_seed: Optional[int] = None, safe_mode: bool = False, + safe_prompt: bool = False, ) -> AsyncGenerator[ChatCompletionStreamResponse, None]: """An Asynchronous chat endpoint that streams responses. @@ -189,7 +192,8 @@ async def chat_stream( top_p (Optional[float], optional): the cumulative probability of tokens to generate, e.g. 0.9. Defaults to None. random_seed (Optional[int], optional): the random seed to use for sampling, e.g. 42. Defaults to None. - safe_mode (bool, optional): whether to use safe mode, e.g. true. Defaults to False. + safe_mode (bool, optional): deprecated, use safe_prompt instead. Defaults to False. + safe_prompt (bool, optional): whether to use safe prompt, e.g. true. Defaults to False. Returns: AsyncGenerator[ChatCompletionStreamResponse, None]: @@ -204,7 +208,7 @@ async def chat_stream( top_p=top_p, random_seed=random_seed, stream=True, - safe_mode=safe_mode, + safe_prompt=safe_mode or safe_prompt, ) async_response = self._request( "post", request, "v1/chat/completions", stream=True diff --git a/src/mistralai/client.py b/src/mistralai/client.py index 1f3837fa..365079d9 100644 --- a/src/mistralai/client.py +++ b/src/mistralai/client.py @@ -125,6 +125,7 @@ def chat( top_p: Optional[float] = None, random_seed: Optional[int] = None, safe_mode: bool = False, + safe_prompt: bool = False, ) -> ChatCompletionResponse: """A chat endpoint that returns a single response. @@ -137,7 +138,8 @@ def chat( top_p (Optional[float], optional): the cumulative probability of tokens to generate, e.g. 0.9. Defaults to None. random_seed (Optional[int], optional): the random seed to use for sampling, e.g. 42. Defaults to None. - safe_mode (bool, optional): whether to use safe mode, e.g. true. Defaults to False. + safe_mode (bool, optional): deprecated, use safe_prompt instead. Defaults to False. + safe_prompt (bool, optional): whether to use safe prompt, e.g. true. Defaults to False. Returns: ChatCompletionResponse: a response object containing the generated text. @@ -150,7 +152,7 @@ def chat( top_p=top_p, random_seed=random_seed, stream=False, - safe_mode=safe_mode, + safe_prompt=safe_mode or safe_prompt, ) single_response = self._request("post", request, "v1/chat/completions") @@ -169,6 +171,7 @@ def chat_stream( top_p: Optional[float] = None, random_seed: Optional[int] = None, safe_mode: bool = False, + safe_prompt: bool = False, ) -> Iterable[ChatCompletionStreamResponse]: """A chat endpoint that streams responses. @@ -181,7 +184,8 @@ def chat_stream( top_p (Optional[float], optional): the cumulative probability of tokens to generate, e.g. 0.9. Defaults to None. random_seed (Optional[int], optional): the random seed to use for sampling, e.g. 42. Defaults to None. - safe_mode (bool, optional): whether to use safe mode, e.g. true. Defaults to False. + safe_mode (bool, optional): deprecated, use safe_prompt instead. Defaults to False. + safe_prompt (bool, optional): whether to use safe prompt, e.g. true. Defaults to False. Returns: Iterable[ChatCompletionStreamResponse]: @@ -195,7 +199,7 @@ def chat_stream( top_p=top_p, random_seed=random_seed, stream=True, - safe_mode=safe_mode, + safe_prompt=safe_mode or safe_prompt, ) response = self._request("post", request, "v1/chat/completions", stream=True) diff --git a/src/mistralai/client_base.py b/src/mistralai/client_base.py index b6d1c488..f6ab4354 100644 --- a/src/mistralai/client_base.py +++ b/src/mistralai/client_base.py @@ -47,12 +47,12 @@ def _make_chat_request( top_p: Optional[float] = None, random_seed: Optional[int] = None, stream: Optional[bool] = None, - safe_mode: Optional[bool] = False, + safe_prompt: Optional[bool] = False, ) -> Dict[str, Any]: request_data: Dict[str, Any] = { "model": model, "messages": [msg.model_dump() for msg in messages], - "safe_prompt": safe_mode, + "safe_prompt": safe_prompt, } if temperature is not None: request_data["temperature"] = temperature