diff --git a/.genignore b/.genignore new file mode 100644 index 00000000..6bd11b26 --- /dev/null +++ b/.genignore @@ -0,0 +1,6 @@ +pyproject.toml +examples/* +/utils/* +src/mistralai/extra/* +pylintrc +scripts/prepare_readme.py diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 00000000..4d75d590 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,2 @@ +# This allows generated code to be indexed correctly +*.py linguist-generated=false \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/bug_report_client.yml b/.github/ISSUE_TEMPLATE/bug_report_client.yml new file mode 100644 index 00000000..669503ac --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report_client.yml @@ -0,0 +1,55 @@ +name: Bug report related to the Python client +description: Submit a bug report that's related to the Python client +title: '[BUG CLIENT]: ' +labels: ['bug client', 'triage'] +body: + - type: markdown + attributes: + value: | + Thanks for taking the time to fill out this bug report! + - type: textarea + id: python-vv + attributes: + label: Python -VV + description: Run `python -VV` from your virtual environment + placeholder: Copy-paste the output (no need for backticks, will be formatted into code automatically) + render: shell + validations: + required: true + - type: textarea + id: pip-freeze + attributes: + label: Pip Freeze + description: Run `pip freeze` from your virtual environment + placeholder: Copy-paste the output (no need for backticks, will be formatted into code automatically) + render: shell + validations: + required: true + - type: textarea + id: reproduction-steps + attributes: + label: Reproduction Steps + description: Provide a clear and concise description of the steps that lead to your issue. + placeholder: | + 1. First step... + 2. Second step... + ... + validations: + required: true + - type: textarea + id: expected-behavior + attributes: + label: Expected Behavior + description: Explain briefly what you expected to happen. + validations: + required: true + - type: textarea + id: additional-context + attributes: + label: Additional Context + description: Add any context about your problem that you deem relevant. + - type: textarea + id: suggested-solutions + attributes: + label: Suggested Solutions + description: Please list any solutions you recommend we consider. diff --git a/.github/ISSUE_TEMPLATE/bug_report_model.yml b/.github/ISSUE_TEMPLATE/bug_report_model.yml new file mode 100644 index 00000000..aa4bb622 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report_model.yml @@ -0,0 +1,60 @@ +name: Bug report related to our models +description: Submit a bug report that's related to our models behavior +title: '[BUG MODEL]: ' +labels: ['bug model', 'triage'] +body: + - type: markdown + attributes: + value: | + Thanks for taking the time to fill out this bug report! + - type: dropdown + id: model + attributes: + label: Model + description: What's the model? + options: + - open-mistral-7b + - open-mixtral-8x7b + - open-mixtral-8x22b + - mistral-small-latest + - mistral-medium-latest + - mistral-large-latest + - mistral-embed + - codestral-latest + default: 0 + validations: + required: true + - type: textarea + id: model-payload + attributes: + label: Request Payload + description: Please provide the full request payload. + placeholder: | + Please provide a complete example of the request payload, including, if possible, parameters such as 'messages', 'max_tokens', 'temperature', 'prefix', and 'tool_calls'. + If you can, please set the 'temperature' parameter to 0 in your example. This will greatly help us. + validations: + required: true + - type: textarea + id: model-output + attributes: + label: Output + description: Provide the output of the model. + validations: + required: true + - type: textarea + id: model-expected-output + attributes: + label: Expected Behavior + description: Explain briefly what you expected to happen. + validations: + required: true + - type: textarea + id: model-additional-context + attributes: + label: Additional Context + description: Add any context about your problem that you deem relevant. + - type: textarea + id: model-suggested-solutions + attributes: + label: Suggested Solutions + description: Please list any solutions you recommend we consider. diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 00000000..35251c3e --- /dev/null +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,8 @@ +blank_issues_enabled: false +contact_links: + - name: Documentation + url: https://docs.mistral.ai + about: Developer documentation for the Mistral AI platform + - name: Discord + url: https://discord.com/invite/mistralai + about: Chat with the Mistral community diff --git a/.github/workflows/build_publish.yaml b/.github/workflows/build_publish.yaml deleted file mode 100644 index d9113945..00000000 --- a/.github/workflows/build_publish.yaml +++ /dev/null @@ -1,81 +0,0 @@ -name: Build and Publish - -on: - push: - branches: ["main"] - - # We only deploy on tags and main branch - tags: - # Only run on tags that match the following regex - # This will match tags like 1.0.0, 1.0.1, etc. - - "[0-9]+.[0-9]+.[0-9]+" - - # Build on pull requests - pull_request: - -jobs: - lint: - runs-on: ubuntu-latest - - steps: - # Checkout the repository - - name: Checkout - uses: actions/checkout@v4 - - # Set python version to 3.11 - - name: set python version - uses: actions/setup-python@v4 - with: - python-version: 3.11 - - # Install Build stuff - - name: Install Dependencies - run: | - pip install poetry \ - && poetry config virtualenvs.create false \ - && poetry install - - # Ruff - - name: Ruff check - run: | - poetry run ruff . - - # Mypy - - name: Mypy Check - run: | - poetry run mypy . - - publish: - if: startsWith(github.ref, 'refs/tags') - runs-on: ubuntu-latest - needs: lint - steps: - - # Checkout the repository - - name: Checkout - uses: actions/checkout@v4 - - # Set python version to 3.11 - - name: set python version - uses: actions/setup-python@v4 - with: - python-version: 3.11 - - # Install Build stuff - - name: Install Dependencies - run: | - pip install poetry \ - && poetry config virtualenvs.create false \ - && poetry install - - # build package using poetry - - name: Build Package - run: | - poetry version ${{ github.ref_name }} - poetry build - - # Publish to PyPi - - name: Pypi publish (prod) - run: | - poetry config pypi-token.pypi ${{ secrets.PYPI_TOKEN }} - poetry publish \ No newline at end of file diff --git a/.github/workflows/lint_custom_code.yaml b/.github/workflows/lint_custom_code.yaml new file mode 100644 index 00000000..9dcb04e4 --- /dev/null +++ b/.github/workflows/lint_custom_code.yaml @@ -0,0 +1,33 @@ +name: Linting Python custom files + +on: + push: + branches: + - main + pull_request: + branches: + - main + +jobs: + lint: + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3 + + - name: Set up Python + uses: actions/setup-python@7f4fc3e22c37d6ff65e88745f38bd3157c663f7c # v4 + with: + python-version: '3.12' + + - name: Install uv + uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6 + + - name: Install dependencies + run: | + uv sync --all-extras + + # The init, sdkhooks.py and types.py files in the _hooks folders are generated by Speakeasy hence the exclusion + - name: Run all linters + run: scripts/lint_custom_code.sh diff --git a/.github/workflows/run_example_scripts.yaml b/.github/workflows/run_example_scripts.yaml new file mode 100644 index 00000000..cecefb0e --- /dev/null +++ b/.github/workflows/run_example_scripts.yaml @@ -0,0 +1,51 @@ +name: Running the examples + +on: + push: + branches: + - main + pull_request: + branches: + - main + +jobs: + run_examples: + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + python-version: ['3.10', '3.11', '3.12', '3.13'] + + steps: + - name: Checkout code + uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3 + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@42375524e23c412d93fb67b49958b491fce71c38 # v5.4.0 + with: + python-version: ${{ matrix.python-version }} + + - name: Install uv + uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6 + + - name: Set VERSION + run: | + VERSION=$(echo ${{ matrix.python-version }} | tr -d .) + echo "VERSION=$VERSION" >> $GITHUB_ENV + + - name: Set MISTRAL_API_KEY using VERSION + run: | + echo "MISTRAL_API_KEY=${{ secrets[format('CI_MISTRAL_API_KEY_PYTHON_{0}', env.VERSION)] }}" >> $GITHUB_ENV + + - name: Build the package + run: | + uv build + + - name: Install client with extras and run all examples. + run: | + PACKAGE="dist/$(ls dist | grep whl | head -n 1)[agents]" + uv pip install --system "$PACKAGE" + ./scripts/run_examples.sh + env: + MISTRAL_AGENT_ID: ${{ secrets.CI_AGENT_ID }} + MISTRAL_API_KEY: ${{ env.MISTRAL_API_KEY }} diff --git a/.github/workflows/sdk_generation_mistralai_azure_sdk.yaml b/.github/workflows/sdk_generation_mistralai_azure_sdk.yaml new file mode 100644 index 00000000..b5d0741b --- /dev/null +++ b/.github/workflows/sdk_generation_mistralai_azure_sdk.yaml @@ -0,0 +1,87 @@ +name: Generate MISTRAL-PYTHON-SDK-AZURE +permissions: + checks: write + contents: write + pull-requests: write + statuses: write +"on": + workflow_dispatch: + inputs: + force: + description: Force generation of SDKs + type: boolean + default: false + set_version: + description: optionally set a specific SDK version + type: string +jobs: + generate: + uses: speakeasy-api/sdk-generation-action/.github/workflows/workflow-executor.yaml@7951d9dce457425b900b2dd317253499d98c2587 # v15 + with: + force: ${{ github.event.inputs.force }} + mode: pr + set_version: ${{ github.event.inputs.set_version }} + speakeasy_version: latest + target: mistralai-azure-sdk + secrets: + github_access_token: ${{ secrets.GITHUB_TOKEN }} + pypi_token: ${{ secrets.PYPI_TOKEN }} + speakeasy_api_key: ${{ secrets.SPEAKEASY_API_KEY }} + + align-version: + needs: generate + runs-on: ubuntu-latest + steps: + - name: Find PR branch + id: find-pr + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + PR_BRANCH=$(gh pr list --repo ${{ github.repository }} --author "app/github-actions" \ + --json headRefName,updatedAt --jq 'sort_by(.updatedAt) | reverse | .[0].headRefName // empty') + echo "branch=$PR_BRANCH" >> $GITHUB_OUTPUT + + - name: Checkout PR branch + if: steps.find-pr.outputs.branch != '' + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + with: + ref: ${{ steps.find-pr.outputs.branch }} + + - name: Install uv + if: steps.find-pr.outputs.branch != '' + uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6 + + - name: Align version using uv + if: steps.find-pr.outputs.branch != '' + run: | + set -euo pipefail + + VERSION=$(grep 'releaseVersion:' packages/azure/.speakeasy/gen.lock | head -1 | awk '{print $2}' | tr -d '"') + if [ -z "$VERSION" ]; then + echo "No releaseVersion found in gen.lock" + exit 0 + fi + echo "Found version: $VERSION" + + CURRENT=$(grep '^version = ' packages/azure/pyproject.toml | head -1 | sed 's/version = "\(.*\)"/\1/') + if [ "$CURRENT" = "$VERSION" ]; then + echo "Version already aligned to $VERSION" + exit 0 + fi + + echo "Updating version from $CURRENT to $VERSION" + uv version "$VERSION" --directory packages/azure + + - name: Commit and push + if: steps.find-pr.outputs.branch != '' + run: | + git config user.email "action@github.com" + git config user.name "GitHub Action" + git add packages/azure/pyproject.toml packages/azure/uv.lock + if git diff --cached --quiet; then + echo "No changes to commit" + else + VERSION=$(grep '^version = ' packages/azure/pyproject.toml | head -1 | sed 's/version = "\(.*\)"/\1/') + git commit -m "chore: align Azure pyproject.toml and uv.lock to version $VERSION" + git push + fi diff --git a/.github/workflows/sdk_generation_mistralai_gcp_sdk.yaml b/.github/workflows/sdk_generation_mistralai_gcp_sdk.yaml new file mode 100644 index 00000000..05f88e25 --- /dev/null +++ b/.github/workflows/sdk_generation_mistralai_gcp_sdk.yaml @@ -0,0 +1,87 @@ +name: Generate MISTRAL-PYTHON-SDK-GOOGLE-CLOUD +permissions: + checks: write + contents: write + pull-requests: write + statuses: write +"on": + workflow_dispatch: + inputs: + force: + description: Force generation of SDKs + type: boolean + default: false + set_version: + description: optionally set a specific SDK version + type: string +jobs: + generate: + uses: speakeasy-api/sdk-generation-action/.github/workflows/workflow-executor.yaml@7951d9dce457425b900b2dd317253499d98c2587 # v15 + with: + force: ${{ github.event.inputs.force }} + mode: pr + set_version: ${{ github.event.inputs.set_version }} + speakeasy_version: latest + target: mistralai-gcp-sdk + secrets: + github_access_token: ${{ secrets.GITHUB_TOKEN }} + pypi_token: ${{ secrets.PYPI_TOKEN }} + speakeasy_api_key: ${{ secrets.SPEAKEASY_API_KEY }} + + align-version: + needs: generate + runs-on: ubuntu-latest + steps: + - name: Find PR branch + id: find-pr + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + PR_BRANCH=$(gh pr list --repo ${{ github.repository }} --author "app/github-actions" \ + --json headRefName,updatedAt --jq 'sort_by(.updatedAt) | reverse | .[0].headRefName // empty') + echo "branch=$PR_BRANCH" >> $GITHUB_OUTPUT + + - name: Checkout PR branch + if: steps.find-pr.outputs.branch != '' + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + with: + ref: ${{ steps.find-pr.outputs.branch }} + + - name: Install uv + if: steps.find-pr.outputs.branch != '' + uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6 + + - name: Align version using uv + if: steps.find-pr.outputs.branch != '' + run: | + set -euo pipefail + + VERSION=$(grep 'releaseVersion:' packages/gcp/.speakeasy/gen.lock | head -1 | awk '{print $2}' | tr -d '"') + if [ -z "$VERSION" ]; then + echo "No releaseVersion found in gen.lock" + exit 0 + fi + echo "Found version: $VERSION" + + CURRENT=$(grep '^version = ' packages/gcp/pyproject.toml | head -1 | sed 's/version = "\(.*\)"/\1/') + if [ "$CURRENT" = "$VERSION" ]; then + echo "Version already aligned to $VERSION" + exit 0 + fi + + echo "Updating version from $CURRENT to $VERSION" + uv version "$VERSION" --directory packages/gcp + + - name: Commit and push + if: steps.find-pr.outputs.branch != '' + run: | + git config user.email "action@github.com" + git config user.name "GitHub Action" + git add packages/gcp/pyproject.toml packages/gcp/uv.lock + if git diff --cached --quiet; then + echo "No changes to commit" + else + VERSION=$(grep '^version = ' packages/gcp/pyproject.toml | head -1 | sed 's/version = "\(.*\)"/\1/') + git commit -m "chore: align GCP pyproject.toml and uv.lock to version $VERSION" + git push + fi diff --git a/.github/workflows/sdk_generation_mistralai_sdk.yaml b/.github/workflows/sdk_generation_mistralai_sdk.yaml new file mode 100644 index 00000000..59fe1150 --- /dev/null +++ b/.github/workflows/sdk_generation_mistralai_sdk.yaml @@ -0,0 +1,87 @@ +name: Generate MISTRALAI +permissions: + checks: write + contents: write + pull-requests: write + statuses: write +"on": + workflow_dispatch: + inputs: + force: + description: Force generation of SDKs + type: boolean + default: false + set_version: + description: optionally set a specific SDK version + type: string +jobs: + generate: + uses: speakeasy-api/sdk-generation-action/.github/workflows/workflow-executor.yaml@7951d9dce457425b900b2dd317253499d98c2587 # v15 + with: + force: ${{ github.event.inputs.force }} + mode: pr + set_version: ${{ github.event.inputs.set_version }} + speakeasy_version: latest + target: mistralai-sdk + secrets: + github_access_token: ${{ secrets.GITHUB_TOKEN }} + pypi_token: ${{ secrets.PYPI_TOKEN }} + speakeasy_api_key: ${{ secrets.SPEAKEASY_API_KEY }} + + align-version: + needs: generate + runs-on: ubuntu-latest + steps: + - name: Find PR branch + id: find-pr + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + PR_BRANCH=$(gh pr list --repo ${{ github.repository }} --author "app/github-actions" \ + --json headRefName,updatedAt --jq 'sort_by(.updatedAt) | reverse | .[0].headRefName // empty') + echo "branch=$PR_BRANCH" >> $GITHUB_OUTPUT + + - name: Checkout PR branch + if: steps.find-pr.outputs.branch != '' + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + with: + ref: ${{ steps.find-pr.outputs.branch }} + + - name: Install uv + if: steps.find-pr.outputs.branch != '' + uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6 + + - name: Align version using uv + if: steps.find-pr.outputs.branch != '' + run: | + set -euo pipefail + + VERSION=$(grep 'releaseVersion:' .speakeasy/gen.lock | head -1 | awk '{print $2}' | tr -d '"') + if [ -z "$VERSION" ]; then + echo "No releaseVersion found in gen.lock" + exit 0 + fi + echo "Found version: $VERSION" + + CURRENT=$(grep '^version = ' pyproject.toml | head -1 | sed 's/version = "\(.*\)"/\1/') + if [ "$CURRENT" = "$VERSION" ]; then + echo "Version already aligned to $VERSION" + exit 0 + fi + + echo "Updating version from $CURRENT to $VERSION" + uv version "$VERSION" + + - name: Commit and push + if: steps.find-pr.outputs.branch != '' + run: | + git config user.email "action@github.com" + git config user.name "GitHub Action" + git add pyproject.toml uv.lock + if git diff --cached --quiet; then + echo "No changes to commit" + else + VERSION=$(grep '^version = ' pyproject.toml | head -1 | sed 's/version = "\(.*\)"/\1/') + git commit -m "chore: align pyproject.toml and uv.lock to version $VERSION" + git push + fi diff --git a/.github/workflows/sdk_publish_mistralai_sdk.yaml b/.github/workflows/sdk_publish_mistralai_sdk.yaml new file mode 100644 index 00000000..44635571 --- /dev/null +++ b/.github/workflows/sdk_publish_mistralai_sdk.yaml @@ -0,0 +1,30 @@ +name: Publish MISTRALAI-SDK +permissions: + checks: write + contents: write + pull-requests: write + statuses: write +"on": + workflow_dispatch: + inputs: + confirm_publish: + description: 'WARNING: This will publish v2 SDK (mistralai.client namespace) which is still WIP/alpha. To publish v1 (mistralai namespace), use the v1 branch instead. Type "publish" to confirm.' + required: false + type: string + push: + branches: + - v1 + paths: + - RELEASES.md + - "*/RELEASES.md" +jobs: + publish: + # Auto-publish from v1 branch; require manual confirmation from main + if: | + github.ref == 'refs/heads/v1' || + (github.event_name == 'workflow_dispatch' && github.event.inputs.confirm_publish == 'publish') + uses: speakeasy-api/sdk-generation-action/.github/workflows/sdk-publish.yaml@7951d9dce457425b900b2dd317253499d98c2587 # v15 + secrets: + github_access_token: ${{ secrets.GITHUB_TOKEN }} + pypi_token: ${{ secrets.PYPI_TOKEN }} + speakeasy_api_key: ${{ secrets.SPEAKEASY_API_KEY }} diff --git a/.github/workflows/test_custom_code.yaml b/.github/workflows/test_custom_code.yaml new file mode 100644 index 00000000..9a53c1e5 --- /dev/null +++ b/.github/workflows/test_custom_code.yaml @@ -0,0 +1,36 @@ +name: Testing Python custom files + +on: + push: + branches: + - main + pull_request: + branches: + - main + +jobs: + lint: + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3 + + - name: Set up Python + id: setup-python + uses: actions/setup-python@7f4fc3e22c37d6ff65e88745f38bd3157c663f7c # v4 + with: + python-version: '3.12' + + - name: Install uv + uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6 + + - name: Install dependencies + run: | + uv sync --all-extras + + - name: Run the 'src/mistralai/extra' package unit tests + run: uv run python3.12 -m unittest discover -s src/mistralai/extra/tests -t src + + - name: Run pytest for repository tests + run: uv run pytest tests/ diff --git a/.github/workflows/update_speakeasy.yaml b/.github/workflows/update_speakeasy.yaml new file mode 100644 index 00000000..78b5317b --- /dev/null +++ b/.github/workflows/update_speakeasy.yaml @@ -0,0 +1,110 @@ +name: Update Speakeasy SDKs +permissions: + checks: write + contents: write + pull-requests: write + statuses: write +on: + workflow_dispatch: + inputs: + version: + description: 'Speakeasy version to update to (e.g., 1.580.2)' + required: true + type: string + targets: + description: 'Targets to update.' + required: true + type: choice + options: + - mistralai-sdk + - mistralai-azure-sdk + - mistralai-gcp-sdk + - all + +jobs: + update-sdks: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 + + - name: Set up Python + uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + with: + python-version: '3.11' + + - name: Install uv + uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6 + + - name: Install dependencies + run: | + uv sync --group dev --group lint --no-default-groups + + - name: Install Speakeasy CLI + run: | + curl -fsSL https://go.speakeasy.com/cli-install.sh | sh + speakeasy --version + + - name: Configure Git + run: | + git config --local user.email "action@github.com" + git config --local user.name "GitHub Action" + git config --local --type bool push.autoSetupRemote true + + - name: Create branch + run: | + TIMESTAMP=$(date +%Y%m%d-%H%M%S) + echo "TIMESTAMP=$TIMESTAMP" >> $GITHUB_ENV + git checkout -b update-speakeasy-to-${{ github.event.inputs.version }}-$TIMESTAMP + + - name: Update Speakeasy SDKs + run: | + # Split targets and build command with multiple --targets flags + TARGETS_ARGS="" + for target in ${{ github.event.inputs.targets }}; do + TARGETS_ARGS="$TARGETS_ARGS --targets $target" + done + + uv run inv update-speakeasy \ + --version "${{ github.event.inputs.version }}" \ + $TARGETS_ARGS + env: + SPEAKEASY_API_KEY: ${{ secrets.SPEAKEASY_API_KEY }} + + - name: Check for changes + id: check-changes + run: | + if [ -n "$(git status --porcelain)" ]; then + echo "has_changes=true" >> $GITHUB_OUTPUT + echo "Files changed:" + git status --porcelain + else + echo "has_changes=false" >> $GITHUB_OUTPUT + echo "No changes detected" + fi + + - name: Commit and push changes + if: steps.check-changes.outputs.has_changes == 'true' + run: | + git add . + git commit -m "Update Speakeasy SDKs to version ${{ github.event.inputs.version }}" + git push origin update-speakeasy-to-${{ github.event.inputs.version }}-${{ env.TIMESTAMP }} + + - name: Create Pull Request + if: steps.check-changes.outputs.has_changes == 'true' + run: | + gh pr create \ + --base main \ + --head update-speakeasy-to-${{ github.event.inputs.version }}-${{ env.TIMESTAMP }} \ + --title "Update Speakeasy SDKs to version ${{ github.event.inputs.version }}" \ + --body "This PR updates the Speakeasy SDKs to version ${{ github.event.inputs.version }}. It was automatically generated by the [Update Speakeasy workflow](.github/workflows/update_speakeasy.yaml)." \ + --label automated \ + --label speakeasy-update \ + --assignee ${{ github.actor }} + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + - name: Comment on workflow run + if: steps.check-changes.outputs.has_changes == 'false' + run: | + echo "No changes were detected. The SDKs are already up to date with version ${{ github.event.inputs.version }}." diff --git a/.gitignore b/.gitignore index 6769e21d..cf2de5ee 100644 --- a/.gitignore +++ b/.gitignore @@ -1,11 +1,22 @@ +.env.local +.idea +**/__pycache__/ +**/.speakeasy/temp/ +**/.speakeasy/logs/ +.vscode/ +.speakeasy/reports +README-PYPI.md +.venv/ +pyrightconfig.json +src/*.egg-info/ +.python-version +.DS_Store # Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] *$py.class - # C extensions *.so - # Distribution / packaging .Python build/ @@ -25,17 +36,14 @@ share/python-wheels/ .installed.cfg *.egg MANIFEST - # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. *.manifest *.spec - # Installer logs pip-log.txt pip-delete-this-directory.txt - # Unit test / coverage reports htmlcov/ .tox/ @@ -50,75 +58,61 @@ coverage.xml .hypothesis/ .pytest_cache/ cover/ - # Translations *.mo *.pot - # Django stuff: *.log local_settings.py db.sqlite3 db.sqlite3-journal - # Flask stuff: instance/ .webassets-cache - # Scrapy stuff: .scrapy - # Sphinx documentation docs/_build/ - # PyBuilder .pybuilder/ target/ - # Jupyter Notebook .ipynb_checkpoints - # IPython profile_default/ ipython_config.py - # pyenv # For a library or package, you might want to ignore these files since the code is # intended to run in multiple environments; otherwise, check them in: # .python-version - # pipenv # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. # However, in case of collaboration, if having platform-specific dependencies or dependencies # having no cross-platform support, pipenv may install dependencies that don't work, or not # install all needed dependencies. #Pipfile.lock - # poetry # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. # This is especially recommended for binary packages to ensure reproducibility, and is more # commonly ignored for libraries. # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control #poetry.lock - # pdm # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. #pdm.lock # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it # in version control. -# https://pdm.fming.dev/#use-with-ide +# https://pdm.fming.dev/latest/usage/project/#working-with-version-control .pdm.toml - +.pdm-python +.pdm-build/ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm __pypackages__/ - # Celery stuff celerybeat-schedule celerybeat.pid - # SageMath parsed files *.sage.py - # Environments .env .venv @@ -127,34 +121,26 @@ venv/ ENV/ env.bak/ venv.bak/ - # Spyder project settings .spyderproject .spyproject - # Rope project settings .ropeproject - # mkdocs documentation /site - # mypy .mypy_cache/ .dmypy.json dmypy.json - # Pyre type checker .pyre/ - # pytype static type analyzer .pytype/ - # Cython debug symbols cython_debug/ - # PyCharm # JetBrains specific template is maintained in a separate JetBrains.gitignore that can # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore # and can be added to the global gitignore or merged into this file. For a more nuclear # option (not recommended) you can uncomment the following to ignore the entire idea folder. -#.idea/ \ No newline at end of file +#.idea/ diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 00000000..9be71784 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,20 @@ +repos: +- repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.11.10 + hooks: + - id: ruff + args: [--fix] + files: ^(examples/|src/mistralai/|packages/(azure|gcp)/src/mistralai/).*\.py$ + exclude: ^src/mistralai/(__init__|sdkhooks|types)\.py$ +- repo: https://github.com/RobertCraigie/pyright-python + rev: v1.1.401 + hooks: + - id: pyright + files: ^(examples/|src/mistralai/|packages/(azure|gcp)/src/mistralai/).*\.py$ + exclude: ^src/mistralai/(__init__|sdkhooks|types)\.py$ +- repo: https://github.com/pre-commit/mirrors-mypy + rev: v1.15.0 + hooks: + - id: mypy + files: ^(examples/|src/mistralai/|packages/(azure|gcp)/src/mistralai/).*\.py$ + exclude: ^src/mistralai/(__init__|sdkhooks|types)\.py$ diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock new file mode 100644 index 00000000..8e89c12b --- /dev/null +++ b/.speakeasy/gen.lock @@ -0,0 +1,5054 @@ +lockVersion: 2.0.0 +id: 2d045ec7-2ebb-4f4d-ad25-40953b132161 +management: + docChecksum: 9ea68a20ee2ef4565df16947f204034b + docVersion: 1.0.0 + speakeasyVersion: 1.729.0 + generationVersion: 2.841.0 + releaseVersion: 2.0.0rc1 + configChecksum: ba30d47e402a93dc30b5001c33116a3d + repoURL: https://github.com/mistralai/client-python.git + installationURL: https://github.com/mistralai/client-python.git + published: true +persistentEdits: + generation_id: 92ab8a00-49e7-471b-bca6-d18f761863df + pristine_commit_hash: 5eb9662433e80c22603fb3a3bf921f6b285fa2d4 + pristine_tree_hash: 9e781b9b07960a689815c5fa6008765ae4a60716 +features: + python: + additionalDependencies: 1.0.0 + additionalProperties: 1.0.1 + configurableModuleName: 0.2.0 + constsAndDefaults: 1.0.7 + core: 6.0.12 + customCodeRegions: 0.1.1 + defaultEnabledRetries: 0.2.0 + downloadStreams: 1.0.1 + enumUnions: 0.1.0 + envVarSecurityUsage: 0.3.2 + examples: 3.0.2 + flatRequests: 1.0.1 + flattening: 3.1.1 + globalSecurity: 3.0.5 + globalSecurityCallbacks: 1.0.0 + globalSecurityFlattening: 1.0.0 + globalServerURLs: 3.2.0 + includes: 3.0.0 + methodArguments: 1.0.2 + multipartFileContentType: 1.0.0 + nameOverrides: 3.0.3 + nullables: 1.0.2 + openEnums: 1.0.4 + responseFormat: 1.1.0 + retries: 3.0.4 + sdkHooks: 1.2.1 + serverEvents: 1.0.13 + serverEventsSentinels: 0.1.0 + serverIDs: 3.0.0 + unions: 3.1.4 + uploadStreams: 1.0.0 +trackedFiles: + .gitattributes: + id: 24139dae6567 + last_write_checksum: sha1:53134de3ada576f37c22276901e1b5b6d85cd2da + pristine_git_object: 4d75d59008e4d8609876d263419a9dc56c8d6f3a + .vscode/settings.json: + id: 89aa447020cd + last_write_checksum: sha1:f84632c81029fcdda8c3b0c768d02b836fc80526 + pristine_git_object: 8d79f0abb72526f1fb34a4c03e5bba612c6ba2ae + deleted: true + USAGE.md: + id: 3aed33ce6e6f + last_write_checksum: sha1:50cc0351d6145a805d1d5ae8be4dfce58178e648 + pristine_git_object: f71bbabc223b8cef8d923816fce8d572f3901884 + docs/errors/httpvalidationerror.md: + id: 7fe2e5327e07 + last_write_checksum: sha1:277a46811144643262651853dc6176d21b33573e + pristine_git_object: 712a148c3e2305dca4c702851865f9f8c8e674cc + docs/models/agent.md: + id: ffdbb4c53c87 + last_write_checksum: sha1:c87b05a17785cd83fdfc58cb2d55b6d77d3bc23e + pristine_git_object: 4de5a901d120b85ba5940490a2ec3fd4f1a91136 + docs/models/agentaliasresponse.md: + id: 5ac4721d8947 + last_write_checksum: sha1:15dcc6820e89d2c6bb799e331463419ce29ec167 + pristine_git_object: aa531ec5d1464f95e3938f148c1e88efc30fa6a6 + docs/models/agentconversation.md: + id: 3590c1a566fa + last_write_checksum: sha1:43e7c1ed2b43aca2794d89f2e6d6aa5f1478cc3e + pristine_git_object: 451f6fb8f700dddd54c69593c316bf562b5cbc93 + docs/models/agentconversationagentversion.md: + id: 468e0d1614bb + last_write_checksum: sha1:6e60bf4a18d791d694e90c89bdb8cc38e43c324b + pristine_git_object: 668a8dc0f0c51a231a73aed51b2db13de243a038 + docs/models/agenthandoffdoneevent.md: + id: dcf166a3c3b0 + last_write_checksum: sha1:9e95c09f724827f5e9c202fd634bdfa2baef1b6e + pristine_git_object: 6bfcc3d83457edf05d0f13957d34ead0f260599b + docs/models/agenthandoffentry.md: + id: 39d54f489b84 + last_write_checksum: sha1:a93a604ced2303eb6f93cfe0f1360224d3298b37 + pristine_git_object: 2b689ec720c02b7289ec462d7acca64a82b23570 + docs/models/agenthandoffstartedevent.md: + id: b620102af460 + last_write_checksum: sha1:33732e0465423348c2ace458506a597a3dadf9b2 + pristine_git_object: 518b5a0c4521ec55a5a28ba3ef0ad1c1fce52792 + docs/models/agentsapiv1agentscreateorupdatealiasrequest.md: + id: c09ec9946094 + last_write_checksum: sha1:0883217b4bad21f5d4f8162ca72005bf9105a93f + pristine_git_object: 79406434cc6ff3d1485089f35639d6284f66d6cb + docs/models/agentsapiv1agentsdeletealiasrequest.md: + id: 429307ab315d + last_write_checksum: sha1:8e0a8388bb51c234aa1eb5566cb68389ebe57574 + pristine_git_object: 8e95c0c31e8ac92b374c153d622d7806b9e59a8d + docs/models/agentsapiv1agentsdeleterequest.md: + id: 0faaaa59add9 + last_write_checksum: sha1:2a34269e682bb910b83814b4d730ba2ce07f8cb2 + pristine_git_object: 2799f41817ab0f7a22b49b4ff895c8308525953c + docs/models/agentsapiv1agentsgetagentversion.md: + id: 3316961b40c4 + last_write_checksum: sha1:e4f4c6a64b1c2ec9465b7ad008df4d7859098e59 + pristine_git_object: 7fb9f2d578c4901ca1b41aaada6acc3a5ee94fa1 + docs/models/agentsapiv1agentsgetrequest.md: + id: 01740ae62cff + last_write_checksum: sha1:bc86e90289ec09b40212083a82455b4fe71c7194 + pristine_git_object: ceffe0096ffd6db97a6018d34870c29cec4fb0d3 + docs/models/agentsapiv1agentsgetversionrequest.md: + id: 88ed22b85cde + last_write_checksum: sha1:0ef23807c8efa2662144da66745045abdd2cb60a + pristine_git_object: 96a7358943a69e871a2bb7f0f30d6fe2bb8dff3d + docs/models/agentsapiv1agentslistrequest.md: + id: c2720c209527 + last_write_checksum: sha1:99502da34d868f1563ad1e3ea256f3becdbefa11 + pristine_git_object: 4785a54c561f5f9e1eb7ffd3317c5faa9b8b56dd + docs/models/agentsapiv1agentslistversionaliasesrequest.md: + id: 69c8bce2c017 + last_write_checksum: sha1:4083fc80627b2cc04fd271df21393944730ef1ba + pristine_git_object: 3083bf92641404738948cd57306eac978b701551 + docs/models/agentsapiv1agentslistversionsrequest.md: + id: 0bc44ed8d6bb + last_write_checksum: sha1:315790552fc5b2b3a6c4f7be2eb33100133abe18 + pristine_git_object: 91831700bed92cb4f609f8c412dcb0ee98b544ca + docs/models/agentsapiv1agentsupdaterequest.md: + id: 7692812cd677 + last_write_checksum: sha1:aaccaa13eeb0d775b0c6a0b23c328d3f3c2c2dbf + pristine_git_object: 7ef60becfcdde09c8ce0366361306c5661d67e24 + docs/models/agentsapiv1agentsupdateversionrequest.md: + id: a001251b1624 + last_write_checksum: sha1:0ee9e0fc55fd969f2b8f2c55dec93bf10e0e5b2f + pristine_git_object: e937acc9b1d3f50eee69495b1305f7aee1c960ac + docs/models/agentsapiv1conversationsappendrequest.md: + id: 70f76380e810 + last_write_checksum: sha1:d428dc114b60362d269b5ae50a57ea60b9edee1a + pristine_git_object: ac8a00ecab30305de8eb8c7c08cda1b1c04148c3 + docs/models/agentsapiv1conversationsappendstreamrequest.md: + id: f6ada9a592c5 + last_write_checksum: sha1:8a806ca2e5bad75d9d0cf50726dc0d5b8e7e3eab + pristine_git_object: dbc330f11aa3039c9cea2dd7d477d56d5c4969d0 + docs/models/agentsapiv1conversationsdeleterequest.md: + id: c2c9f084ed93 + last_write_checksum: sha1:9ecca93f8123cebdd1f9e74cf0f4a104b46402a8 + pristine_git_object: c6eed281331cb4d2cac4470de5e04935d22eca5a + docs/models/agentsapiv1conversationsgetrequest.md: + id: d6acce23f92c + last_write_checksum: sha1:b5d5529b72c16293d3d9b5c45dcb2e3798405bcf + pristine_git_object: 67d450c88778cb27d7d0ba06d49d9f419840b32e + docs/models/agentsapiv1conversationshistoryrequest.md: + id: e3efc36ea8b5 + last_write_checksum: sha1:4155100eaed6d3b7410b3f4476f000d1879576be + pristine_git_object: 7e5d39e9a11ac437a24b8c059db56527fa93f8b0 + docs/models/agentsapiv1conversationslistrequest.md: + id: 406c3e92777a + last_write_checksum: sha1:d5c5effcf2ca32900678d20b667bdaf8ca908194 + pristine_git_object: 62c9011faf26b3a4268186f01caf98c186e7d5b4 + docs/models/agentsapiv1conversationslistresponse.md: + id: 394c37d2203f + last_write_checksum: sha1:1144f41f8a97daacfb75c11fdf3575e553cf0859 + pristine_git_object: b233ee203ff5da0c65d6e9f87b2925d6802d2c0a + docs/models/agentsapiv1conversationsmessagesrequest.md: + id: 2c749c6620d4 + last_write_checksum: sha1:781e526b030653dc189d94ca04cdc4742f9506d2 + pristine_git_object: a91ab0466d57379eacea9d475c72db9cb228a649 + docs/models/agentsapiv1conversationsrestartrequest.md: + id: 6955883f9a44 + last_write_checksum: sha1:99c1455c7fde9b82b6940e6e1ed4f363d7c38de9 + pristine_git_object: a18a41f5395adae3942573792c86ddf7c3812ff4 + docs/models/agentsapiv1conversationsrestartstreamrequest.md: + id: 0c39856fd70e + last_write_checksum: sha1:d03475c088c059077049270c69be01c67a17f178 + pristine_git_object: 7548286af5d1db51fbfd29c893eb8afdc3c97c4d + docs/models/agentscompletionrequest.md: + id: 906b82c214dc + last_write_checksum: sha1:b5685a779b633823ccfe99d9740078e0aab50bde + pristine_git_object: 33435732b94c81c7bccff5cf1868b2f382223200 + docs/models/agentscompletionrequestmessage.md: + id: 5337f0644b40 + last_write_checksum: sha1:ecf7b7cdf0d24a5e97b520366cf816b8731734bb + pristine_git_object: 957703b528d3da6f57576064d7cb9b2af63c362a + docs/models/agentscompletionrequeststop.md: + id: ad1e0e74b6b8 + last_write_checksum: sha1:b2422d4dada80d54b2dd499a6659a3894318d2c9 + pristine_git_object: 21ce6fb539238168e6d1dfc5a8206d55d33018d3 + docs/models/agentscompletionrequesttoolchoice.md: + id: bd8a6f9fbb47 + last_write_checksum: sha1:f3d9ec3c82b6bbd2c3cbc320a71b927edcc292b1 + pristine_git_object: 63b9dca9fbb8d829f93d8327a77fbc385a846c76 + docs/models/agentscompletionstreamrequest.md: + id: 21d09756447b + last_write_checksum: sha1:9d506ac8f620f4cef54b4b7a1891fb17b8eaefa5 + pristine_git_object: 407be8e0c1264a31cc0d80c1059f3bd62c2eaceb + docs/models/agentscompletionstreamrequestmessage.md: + id: b309ade92081 + last_write_checksum: sha1:98744c9646969250242cbbfbdf428dbd7030e4bb + pristine_git_object: 6ccf4244a709de7bedbf75042efb935129a6ca01 + docs/models/agentscompletionstreamrequeststop.md: + id: 4925b6b8fbca + last_write_checksum: sha1:c9d0d73ca46643ffdf02e6c6cd35de5c39460c20 + pristine_git_object: 981005f3ff2277eae57c56787edb5f1f62d1fe46 + docs/models/agentscompletionstreamrequesttoolchoice.md: + id: b1f76f7a4e1c + last_write_checksum: sha1:843c4946d5cab61df2cba458af40835c4e8bcafe + pristine_git_object: 4354523a7d0d21721a96e91938b89236169ccced + docs/models/agenttool.md: + id: 513b8b7bc0b7 + last_write_checksum: sha1:9154d0ac6b0ab8970a10a8ad7716009d62e80ce7 + pristine_git_object: 022f7e10edb22cb1b1d741c13ac586bd136d03b5 + docs/models/apiendpoint.md: + id: be613fd9b947 + last_write_checksum: sha1:4d984c11248f7da42c949164e69b53995d5942c4 + pristine_git_object: 8d83a26f19241da5ce626ff9526575c50e5d27be + docs/models/archivemodelresponse.md: + id: 133f4af8058f + last_write_checksum: sha1:95fa73ebd765cbd244c847218df6d31e18dc5e85 + pristine_git_object: 276656d1d00ca174e78aa9102f7f576575daa818 + docs/models/arguments.md: + id: 7ea5e33709a7 + last_write_checksum: sha1:09eea126210d7fd0353e60a76bf1dbed173f13ec + pristine_git_object: 2e54e27e0ca97bee87918b2ae38cc6c335669a79 + docs/models/assistantmessage.md: + id: 7e0218023943 + last_write_checksum: sha1:47d5cd1a1bef9e398c12c207f5b3d8486d94f359 + pristine_git_object: 9ef638379aee1198742743800e778409c47a9b9d + docs/models/assistantmessagecontent.md: + id: 9f1795bbe642 + last_write_checksum: sha1:1ce4066623a8d62d969e5ed3a088d73a9ba26643 + pristine_git_object: 047b7cf95f4db203bf2c501680b73ca0562a122d + docs/models/attributes.md: + id: ececf40457de + last_write_checksum: sha1:9f23adf16a682cc43346d157f7e971c596b416ef + pristine_git_object: 147708d9238e40e1cdb222beee15fbe8c1603050 + docs/models/audiochunk.md: + id: 88315a758fd4 + last_write_checksum: sha1:b47b295122cea28d66212d75a1f0eccd70a248cc + pristine_git_object: 1ba8b0f578fa94b4f8dddf559798e033a1704e7b + docs/models/audioencoding.md: + id: 1e0dfee9c2a0 + last_write_checksum: sha1:5d47cfaca916d7a47adbea71748595b3ab69a478 + pristine_git_object: feec8c71bf5a89a5c0099a9d075bc2bd36dd5f73 + docs/models/audioformat.md: + id: 41973dd397de + last_write_checksum: sha1:b81fbaf4f8aa03937c91f76d371ad5860836a772 + pristine_git_object: d174ab9959cadde659f76db94ed87c743e0f6783 + docs/models/audiotranscriptionrequest.md: + id: ebf59641bc84 + last_write_checksum: sha1:a478d0656a0f69d4c426e548e2236b99730e2084 + pristine_git_object: 80bd53015ddee1bcecc7aeecc75152a19afc22c1 + docs/models/audiotranscriptionrequeststream.md: + id: 79b5f721b753 + last_write_checksum: sha1:df6825c05b5a02dcf904ebaa40fb97e9186248cc + pristine_git_object: 5d64964d1a635da912f2553c306fb8654ebfca2e + docs/models/basemodelcard.md: + id: 2f62bfbd650e + last_write_checksum: sha1:4b29e0d24060b6724e82aeee05befe1cddb316f4 + pristine_git_object: 0f42504fd6446c0baf4686bfbb8481658b6789cd + docs/models/batcherror.md: + id: 8053e29a3f26 + last_write_checksum: sha1:23a12dc2e95f92a7a3691bd65a1b05012c669f0f + pristine_git_object: 95016cdc4c6225d23edc4436e11e4a7feacf1fe6 + docs/models/batchjob.md: + id: de2a00d0f739 + last_write_checksum: sha1:1160822c4032e1745dfaf37abcac02e78cbc4fb4 + pristine_git_object: 162e2cff3a1132f2b89e57dcf1bf8b4c403b6453 + docs/models/batchjobstatus.md: + id: 7e6f034d3c91 + last_write_checksum: sha1:9e876b4b94255e1399bbb31feb51e08691bcb8fc + pristine_git_object: 64617b31488130f94bf47952ccaa4958670473c8 + docs/models/batchrequest.md: + id: b113ca846594 + last_write_checksum: sha1:f9dc702c27b8257e008390519df744290e09c4b4 + pristine_git_object: 6ee3b394a8b1125769a355359b5a44bc7c3224ea + docs/models/builtinconnectors.md: + id: 9d14e972f08a + last_write_checksum: sha1:1f32eb515e32c58685d0bdc15de09656194c508c + pristine_git_object: f96f50444aaa23ca291db2fd0dc69db0d9d149d9 + docs/models/chatclassificationrequest.md: + id: 57b86771c870 + last_write_checksum: sha1:bfd2fb8e2c83578ca0cea5209ea3f18c3bcd2ae5 + pristine_git_object: ba9c95eab2c1e4f080e39e8804a5de222e052ee6 + docs/models/chatcompletionchoice.md: + id: 0d15c59ab501 + last_write_checksum: sha1:a6274a39a4239e054816d08517bf8507cb5c4564 + pristine_git_object: deaa0ea073e1b6c21bd466c10db31db2464066f1 + docs/models/chatcompletionchoicefinishreason.md: + id: 225764da91d3 + last_write_checksum: sha1:b894d3408cb801e072c3c302a5676ff939d59284 + pristine_git_object: b2f15ecbe88328de95b4961ddb3940fd8a6ee64b + docs/models/chatcompletionrequest.md: + id: adffe90369d0 + last_write_checksum: sha1:4980b698006c641b1c84495c5b601cc8662b05f6 + pristine_git_object: 921161faf38b2f4d4648d6d744c08a96ed38f0a6 + docs/models/chatcompletionrequestmessage.md: + id: 3f5e170d418c + last_write_checksum: sha1:7921c5a508a9f88adc01caab34e26182b8035607 + pristine_git_object: 91e9e062d0ef0cb69235c4ae4516548733ce28a9 + docs/models/chatcompletionrequeststop.md: + id: fcaf5bbea451 + last_write_checksum: sha1:71a25f84f0d88c7acf72e801ced6159546201851 + pristine_git_object: 749296d420c0671d2a1d6d22483b51f577a86485 + docs/models/chatcompletionrequesttoolchoice.md: + id: b97041b2f15b + last_write_checksum: sha1:7ad7eb133f70e07d0d6a9def36aadd08b35cf861 + pristine_git_object: dc82a8ef91e7bfd44f1d2d9d9a4ef61b6e76cc34 + docs/models/chatcompletionresponse.md: + id: 7c53b24681b9 + last_write_checksum: sha1:a56581c0846638cfe6df26d3045fb4f874ccd931 + pristine_git_object: a0465ffbfc5558628953e03fbc53b80bbdc8649b + docs/models/chatcompletionstreamrequest.md: + id: cf8f29558a68 + last_write_checksum: sha1:c54d4a32d0d65533b79c381174690e9b735b2800 + pristine_git_object: 8761f000d4249de86265bc63da785cd807c2e7a5 + docs/models/chatcompletionstreamrequestmessage.md: + id: 053a98476cd2 + last_write_checksum: sha1:8270692463fab1243d9de4bbef7162daa64e52c5 + pristine_git_object: 2e4e93acca8983a3ea27b391d4606518946e13fe + docs/models/chatcompletionstreamrequeststop.md: + id: d0e89a4dca78 + last_write_checksum: sha1:a889e9580fa94bda7c848682d6ba501b7f5c0f41 + pristine_git_object: a48460a92ac47fec1de2188ba46b238229736d32 + docs/models/chatcompletionstreamrequesttoolchoice.md: + id: 210d5e5b1413 + last_write_checksum: sha1:0543164caf3f4fb2bef3061dbd1a5e6b34b17ae9 + pristine_git_object: 43f3ca3809bf1a2a040e2ad7c19a2b22db0b73f8 + docs/models/chatmoderationrequest.md: + id: 22862d4d20ec + last_write_checksum: sha1:9bbe510ee67515092bd953ad7f84ae118398af54 + pristine_git_object: f252482db0e404e21a61aafba0d09d9561610c11 + docs/models/chatmoderationrequestinputs1.md: + id: 89311e3e440d + last_write_checksum: sha1:8d4c2dbd9207589aabf9c00cf60c61d2d3eef452 + pristine_git_object: e15b8a844110fae68c02da040cd0122be5afc09a + docs/models/chatmoderationrequestinputs2.md: + id: 4daa876da841 + last_write_checksum: sha1:e34eb6557e06e7783ed14d959c2a29959c26fd4c + pristine_git_object: f40a4ebe0780c493e8bd7a322aec31893669a181 + docs/models/chatmoderationrequestinputs3.md: + id: aec173bca43b + last_write_checksum: sha1:14ce49ace5845bc467fe1559b12374bfd36bc9a7 + pristine_git_object: ff1c6ea32233d5c5e8d6292c62f9e8eacd3340c3 + docs/models/checkpoint.md: + id: 9c97119961cf + last_write_checksum: sha1:0e7732d9c30f67d59fe4d9ad1d165ad0cd80c790 + pristine_git_object: f7f35530c0d57aca02b2503e968a9a262bb1a10d + docs/models/classificationrequest.md: + id: 6f79e905a3fa + last_write_checksum: sha1:3e083210e1cfdd3539e714928688648673767ae8 + pristine_git_object: 99cdc4a0863577d523e8921af31a179f109bc9fb + docs/models/classificationrequestinputs.md: + id: aff99510c85a + last_write_checksum: sha1:c4b52dd83924f56bef1f54c4fbbdf3cd62e96dbe + pristine_git_object: 69d75d11276f6101452a9debfa2cbcdd39333849 + docs/models/classificationresponse.md: + id: 21227dec49f2 + last_write_checksum: sha1:56756a6c0c36ce94653b676eba1f648907a87a79 + pristine_git_object: d1633ae779850cba0eac4a9c26b5b776a7b789e0 + docs/models/classificationtargetresult.md: + id: 97a5eab5eb54 + last_write_checksum: sha1:41269d1372be3523f46cb57bd19292af4971f7c0 + pristine_git_object: f3b10727b023dd83a207d955b3d0f3cd4b7479a1 + docs/models/classifierfinetunedmodel.md: + id: b67a370e0ef1 + last_write_checksum: sha1:5fe3c26e337083716dd823e861924a03c55ce293 + pristine_git_object: ad05f93147d6904ee62602480c24644ec5e4cf63 + docs/models/classifierfinetuningjob.md: + id: 5bf35c25183f + last_write_checksum: sha1:afedddfe38e217189b5ec12ded74606c3b1e4c59 + pristine_git_object: 369756ba16a4c64f03cb6bb5da9bc0abd2a8eac6 + docs/models/classifierfinetuningjobdetails.md: + id: c91d53e010d5 + last_write_checksum: sha1:59a4c11a0d52b02ffc48e011a40fb4ebb1604825 + pristine_git_object: c5efdf1c817b978506a4862991a0f8eab8b219fb + docs/models/classifierfinetuningjobdetailsintegration.md: + id: e6c161ac2a44 + last_write_checksum: sha1:6450686e7f92ac8c1c02fcea82d5855ca6738b46 + pristine_git_object: 438a35d9eb0e4250a9e6bcbb7dafeb26d74e018a + docs/models/classifierfinetuningjobdetailsstatus.md: + id: 87737e85b845 + last_write_checksum: sha1:2ff02df3efee0f9b5867045d43fc71025fb37129 + pristine_git_object: 058c65832188f7148d96ab320114d984d618efa1 + docs/models/classifierfinetuningjobintegration.md: + id: 91de20176a8c + last_write_checksum: sha1:e49a7c082300eb4d3106e96b21ebc6860060b8c3 + pristine_git_object: 820aee4c6fcf899341d869d796b1a61d4d4eab42 + docs/models/classifierfinetuningjobstatus.md: + id: e3c4e672dc88 + last_write_checksum: sha1:1bfd306ab633d3ea73272e56796c1f63843fce22 + pristine_git_object: ca829885de056c5ccafec0fe3a901743e56deb0c + docs/models/classifiertarget.md: + id: 4c5c0b3e0bc7 + last_write_checksum: sha1:ad16823def0acb267543c4189df32406a27685aa + pristine_git_object: f8c99e2e7e6653d0e809506861ec4c25571cb5c9 + docs/models/classifiertargetresult.md: + id: c78d27aec276 + last_write_checksum: sha1:17c37c10385019953d6085fff6681808f950693f + pristine_git_object: ccadc623493bfa946dc2cccf894364b1e6b8b452 + docs/models/classifiertrainingparameters.md: + id: 9370e1ccd3d5 + last_write_checksum: sha1:03f7c32717792966afdec50cb9dc1c85bb99dd84 + pristine_git_object: 3b6f3be6942bbcf56261f773864a518d16923880 + docs/models/codeinterpretertool.md: + id: f009740c6e54 + last_write_checksum: sha1:a2114d61a98a48b4365a77c0c75c06ca834367ad + pristine_git_object: 6302fc627d7c49442b6c9aec19c70fdceaf7c519 + docs/models/completionargs.md: + id: 3b54534f9830 + last_write_checksum: sha1:7432daccf23d8963a65fa4f2b103ea0396fbfbeb + pristine_git_object: 148f760859636e8c32259604698785663491a445 + docs/models/completionargsstop.md: + id: 40b0f0c81dc8 + last_write_checksum: sha1:2a576618c62d4818af0048ed3a79080149a88642 + pristine_git_object: b93f993e44a18fb0f3711163277f538cfedbf828 + docs/models/completionchunk.md: + id: 60cb30423c60 + last_write_checksum: sha1:61b976fe2e71236cf7941ee1635decc31bd304b2 + pristine_git_object: 7f8ab5e631e2c6d1d9830325e591a7e434b83a35 + docs/models/completionevent.md: + id: e57cd17cb9dc + last_write_checksum: sha1:4f59c67af0b11c77b80d2b9c7aca36484d2be219 + pristine_git_object: 7a66e8fee2bb0f1c58166177653893bb05b98f1d + docs/models/completionfinetunedmodel.md: + id: 23a7705a9c89 + last_write_checksum: sha1:50d173b7505a97435c9d7ccb4fa99af04a51c6a2 + pristine_git_object: 0055db021f1c039c84cf7cfecd654683d2f9996f + docs/models/completionfinetuningjob.md: + id: 13c69dd18690 + last_write_checksum: sha1:b77e82f00f851034999986ff67aea5b0b558fbd2 + pristine_git_object: 83c0ae7e551e1f70df8dad4dce75ad20fe2b7ae7 + docs/models/completionfinetuningjobdetails.md: + id: b285f80afd59 + last_write_checksum: sha1:6ced5483d8249d7e8f502ec3f53f45d76e348003 + pristine_git_object: 3c54e874bcd968a9d5d9c8b3285632ba71364763 + docs/models/completionfinetuningjobdetailsintegration.md: + id: 27662795c95f + last_write_checksum: sha1:655f03341ad1b590ec451288607cec61024bfefc + pristine_git_object: 38f6a34963db4a653ec7aa7f0c85b68e837ebafc + docs/models/completionfinetuningjobdetailsrepository.md: + id: 023920eecc9e + last_write_checksum: sha1:2b8ba6ff115fda4cc6ed74825fb09b9500d915f6 + pristine_git_object: c6bd67cde1d1628aa3efc4a53fa8487a009aa129 + docs/models/completionfinetuningjobdetailsstatus.md: + id: b1b717a4e256 + last_write_checksum: sha1:97c8699f0979978ea4320da3388e18da6219cb87 + pristine_git_object: 94d795a9ba4ec743f274d4ab5666e8897d174c61 + docs/models/completionfinetuningjobintegration.md: + id: 392ffc2cdef2 + last_write_checksum: sha1:53540da44e0edbad5d4085f81ded159dbc582a6c + pristine_git_object: dbe57417d78f1de798c6eaea7e56984e3b002cb9 + docs/models/completionfinetuningjobrepository.md: + id: deb47b72e8e4 + last_write_checksum: sha1:c0fd43a01c2f763c7945311741ee3c2b9c7520f6 + pristine_git_object: 54225e27204b703a6b33d2d66492e272559c3b3c + docs/models/completionfinetuningjobstatus.md: + id: 2ac420312815 + last_write_checksum: sha1:90f498cb04e89e8f4a424762c07231fd9030b326 + pristine_git_object: db151a1bd871a2bf231424a78c8c450b2a488099 + docs/models/completionresponsestreamchoice.md: + id: d56824d615a6 + last_write_checksum: sha1:0296a490df009dbfd04893fdebcc88dd6102a872 + pristine_git_object: 1532c25b8fc065d486f52d4610a7f757e5340875 + docs/models/completionresponsestreamchoicefinishreason.md: + id: 5f1fbfc90b8e + last_write_checksum: sha1:20824b4a223cbd3658b32440973a7d47dcd108b9 + pristine_git_object: 0fece473297227c75db4e7ded63417a2f117cac0 + docs/models/completiontrainingparameters.md: + id: b716b0195d39 + last_write_checksum: sha1:1d8d7c469f933ea741ec15c8b9ef8b986e0ca95e + pristine_git_object: 4746a95df18c78331f572425a16b2b3dcbc2df4c + docs/models/confirmation.md: + id: 19b9e48a3c2e + last_write_checksum: sha1:eb6494cb19f23c6df62afb009cc88ce38d24af86 + pristine_git_object: fd6e6aaa58cabba0cdec1b76ac50fb6e46f91b07 + docs/models/contentchunk.md: + id: d2d3a32080cd + last_write_checksum: sha1:b253e4b802adb5b66d896bfc6245ac4d21a0c67c + pristine_git_object: cb7e51d3a6e05f197fceff4a4999594f3e340dac + docs/models/conversationappendrequest.md: + id: 722746e5065c + last_write_checksum: sha1:c8a4a49f0a1fe5cdd2ef6264ef9c600cfc8f7beb + pristine_git_object: 78a96508e4e1c6f83de4556d0bfa3b10c875da37 + docs/models/conversationappendrequesthandoffexecution.md: + id: e3f56d558037 + last_write_checksum: sha1:dc71c8db746bb08f6630e995cf6af9fda747e954 + pristine_git_object: 7418b36a55fab959639aec456a946600eb908efb + docs/models/conversationappendstreamrequest.md: + id: e9f8131435e8 + last_write_checksum: sha1:3afe7eaafbf61abcd9341ee8fbca5c6d0c2db0ab + pristine_git_object: daea9c522a8a0693edce11b1bbeca1f2cba0781e + docs/models/conversationappendstreamrequesthandoffexecution.md: + id: 5739ea777905 + last_write_checksum: sha1:c85584b63c0c5d859ee5d46d6ae167a8ee44e279 + pristine_git_object: 1bbced3e61a521401ae93a7b1f73d0e9c061e5fd + docs/models/conversationevents.md: + id: be63cc7c526e + last_write_checksum: sha1:1667c767ef53fd7aef90452fde2a8245ed2b2ae6 + pristine_git_object: f1e2c4e90181ff729d3fdb37b0135e9bbd095c04 + docs/models/conversationeventsdata.md: + id: d4907b066f4b + last_write_checksum: sha1:f58b7f3e738c2d0146b228076a5dc0c6cf84ffb1 + pristine_git_object: 5452d7d5ce2aa59a6d89c7b7363290e91ed8a0a3 + docs/models/conversationhistory.md: + id: 7e97e8e6d6e9 + last_write_checksum: sha1:719a7c0722f3ad2e9f428dd31abf7bd0bad197d2 + pristine_git_object: daefe3363fb57d9a7d2737d3ea3d6e6f61021d49 + docs/models/conversationinputs.md: + id: 23e3160b457d + last_write_checksum: sha1:0c6abaa34575ee0eb22f12606de3eab7f4b7fbaf + pristine_git_object: 86db40ea1390e84c10a31155b3cde9066eac23b0 + docs/models/conversationmessages.md: + id: 46684ffdf874 + last_write_checksum: sha1:5b10a9f3f19591a2675979c21dd8383d5249d728 + pristine_git_object: 8fa51571697ee375bfbc708de854bc0b1129eec7 + docs/models/conversationrequest.md: + id: dd7f4d6807f2 + last_write_checksum: sha1:e4da423f9eb7a8a5d0c21948b50e8df08a63552c + pristine_git_object: bd7823a88a07d4bc8fe1da82e51f843e70480ee1 + docs/models/conversationrequestagentversion.md: + id: 68aad87b1459 + last_write_checksum: sha1:fd2e9cd7ed2499b5843c592505ec5e0596a50b33 + pristine_git_object: 9f2518211256762d03dec12c4c4464d48f7ed52c + docs/models/conversationrequesthandoffexecution.md: + id: 9733b1e121d1 + last_write_checksum: sha1:f7df210a46acf24abb1312123aebe9e595a190e8 + pristine_git_object: e7314f7e0080ff3f1a80afdbb229c78df5b008bb + docs/models/conversationrequesttool.md: + id: bd1bb6fcea8b + last_write_checksum: sha1:69d503d73f5bd044882d13cd0c7de188dd5f4831 + pristine_git_object: 2e4e8d01b5482c4e0644be52e55bf6912aeff69e + docs/models/conversationresponse.md: + id: 2eccf42d48af + last_write_checksum: sha1:8a86a4d0df6d13b121d5e41a8ee45555b69bf927 + pristine_git_object: 2732f785cdd706274ec5ff383f25fc201e6d0f78 + docs/models/conversationrestartrequest.md: + id: 558e9daa00bd + last_write_checksum: sha1:434e6c94b5d6c37b9026d536308cd1d3ff56e8d6 + pristine_git_object: ad3ff3624f533e4d4f751264d9bc6dd1849b3b69 + docs/models/conversationrestartrequestagentversion.md: + id: e6ea289c6b23 + last_write_checksum: sha1:a5abf95a81b7e080bd3cadf65c2db38ca458573f + pristine_git_object: 019ba301411729ec2c8078404adae998b3b9dacd + docs/models/conversationrestartrequesthandoffexecution.md: + id: faee86c7832c + last_write_checksum: sha1:44728be55e96193e6f433e2f46f8f749f1671097 + pristine_git_object: 5790624b82ce47ea99e5c25c825fbc25145bfb8e + docs/models/conversationrestartstreamrequest.md: + id: 01b92ab1b56d + last_write_checksum: sha1:e9755598b5be197a938f1f74aa77ac24ccac8457 + pristine_git_object: 865a1e8f666d7f6878c40eb70fe5ab1c63da3066 + docs/models/conversationrestartstreamrequestagentversion.md: + id: 395265f34ff6 + last_write_checksum: sha1:ebf4e89a478ab40e1f8cd3f9a000e179426bda47 + pristine_git_object: 9e0063003f1d8acce61cf4edda91ddbc23a3c69d + docs/models/conversationrestartstreamrequesthandoffexecution.md: + id: 3e9c4a9ab94d + last_write_checksum: sha1:300e197f11ad5efc654b51198b75049890258eef + pristine_git_object: 97266b43444f5ed50eeedf574abd99cb201199fd + docs/models/conversationstreamrequest.md: + id: 833f266c4f96 + last_write_checksum: sha1:5cb58852d393eb6cc504b45d8b238fc2f3eecd2a + pristine_git_object: 8b74f9e7cdea83a5622df2c3b79debe3c4427288 + docs/models/conversationstreamrequestagentversion.md: + id: e99ccc842929 + last_write_checksum: sha1:0ba5fca217681cdc5e08e0d82db67884bed076a6 + pristine_git_object: 52ee96720abbb3fec822d0792dbde7020f9fb189 + docs/models/conversationstreamrequesthandoffexecution.md: + id: e6701e5f9f0c + last_write_checksum: sha1:ef2ebe8f23f27144e7403f0a522326a7e4f25f50 + pristine_git_object: c98e194c1d204c3a5d4234f0553712a7025d7f85 + docs/models/conversationstreamrequesttool.md: + id: 71df6212ff44 + last_write_checksum: sha1:f2882742a74dd2b4f74383efa444c7ab968249dc + pristine_git_object: 0f75f82b38f224340bed468ceecfe622066740ba + docs/models/conversationthinkchunk.md: + id: b9a8324da8f1 + last_write_checksum: sha1:80aed188198434ceca134e7aa7351ddba82c92c9 + pristine_git_object: 1fb16bd99f2b6277f87cd40d5c1eca389819d725 + docs/models/conversationthinkchunkthinking.md: + id: 477db2d543bd + last_write_checksum: sha1:d9f8c37fe933a3e52e2adb3ffe283d79c187cd36 + pristine_git_object: 84b800188b248166aac0043994fa27d4d79aad9d + docs/models/conversationusageinfo.md: + id: 57ef89d3ab83 + last_write_checksum: sha1:d92408ad37d7261b0f83588e6216871074a50225 + pristine_git_object: 57e260335959c605a0b9b4eaa8bf1f8272f73ae0 + docs/models/createagentrequest.md: + id: 9484bab389c1 + last_write_checksum: sha1:b3228a622081b6f4b2a8bdaa60ca16049517d819 + pristine_git_object: cca3a079c532d3426f65a15bb0affdd34fd1d3ac + docs/models/createagentrequesttool.md: + id: 72e5f99878c5 + last_write_checksum: sha1:a90ad01c15da321f0c8ec700ba359a5371c5dcbb + pristine_git_object: c6ed3e98566eb684932fae9d2648a85c84443493 + docs/models/createbatchjobrequest.md: + id: e79afe8f495c + last_write_checksum: sha1:6cedce49f3108b9d5bc80e6d11712c594f2d9e50 + pristine_git_object: d094e2d518b31ada68c282241af3aa1483e98ff6 + docs/models/createfileresponse.md: + id: ea1396cebae8 + last_write_checksum: sha1:7b26d0a466004aca5cefaeb29f84dafc405c51ff + pristine_git_object: 8152922b0d4ce199e269df955e5a25d4acf71e28 + docs/models/createfinetuningjobrequest.md: + id: 36824ba035ff + last_write_checksum: sha1:78f019530e9f5deace91c454c91ec6c4d0d23a20 + pristine_git_object: a93e323d5dd474c6d287e1203e85b79d11d762f0 + docs/models/createfinetuningjobrequestintegration.md: + id: e41b5575b494 + last_write_checksum: sha1:06dab95269f4a571a4c62a7f956fbf0250a0e8b3 + pristine_git_object: 0054a4a683a88fe67f92c1659bcb8c792ca8d286 + docs/models/createfinetuningjobrequestrepository.md: + id: e113eb1929b5 + last_write_checksum: sha1:6bd504d3ecb219f3245a83d306c1792133b96769 + pristine_git_object: 32be1b6dc3fcf7f6ee1a1d71abee4c81493655c2 + docs/models/createlibraryrequest.md: + id: 8935b2ed9d13 + last_write_checksum: sha1:c00abfe1abb0f0323e434b084dafa0d451eb3e51 + pristine_git_object: 71562806dbec6444dcdd0a19852a31ca00b6229a + docs/models/deletefileresponse.md: + id: ab3aa44589a0 + last_write_checksum: sha1:47ebc2474e4725e9ecb0f0d5940c604d9a82a4df + pristine_git_object: 188e2504606b051674352339c6aa999116a43b61 + docs/models/deletemodelout.md: + id: 5643e76768d5 + last_write_checksum: sha1:1593c64f7673e59b7ef1f4ae9f5f6b556dd6a269 + pristine_git_object: 5fd4df7a7013dcd4f6489ad29cdc664714d32efd + docs/models/deletemodelv1modelsmodeliddeleterequest.md: + id: c838cee0f093 + last_write_checksum: sha1:e5b6d18b4f8ab91630ae34a4f50f01e536e08d99 + pristine_git_object: d9bc15fe393388f7d0c41abce97ead17e35e2ba4 + docs/models/deltamessage.md: + id: 6c5ed6b60968 + last_write_checksum: sha1:00052476b9b2474dbc149f18dd18c71c86d0fc74 + pristine_git_object: e0ee575f3fce7c312114ce8c5390efc5c4854952 + docs/models/deltamessagecontent.md: + id: 7307bedc8733 + last_write_checksum: sha1:a1211b8cb576ad1358e68983680ee326c3920a5e + pristine_git_object: 8142772d7ea33ad8a75cf9cf822564ba3f630de2 + docs/models/document.md: + id: cd1d2a444370 + last_write_checksum: sha1:c10641b02547bedcc982b8997097083dfc562598 + pristine_git_object: 42c639a6a9b235ab4c754286486fa7e8872a0e7e + docs/models/documentlibrarytool.md: + id: 68083b0ef8f3 + last_write_checksum: sha1:76b9f47c399915a338abe929cb10c1b37282eadf + pristine_git_object: 95c3fa52ee3ff29e72bc0240a98c0afaa0cd5f62 + docs/models/documenttextcontent.md: + id: 29587399f346 + last_write_checksum: sha1:93382da0228027a02501abbcf681f247814d3d68 + pristine_git_object: 989f49e9bcb29f4127cb11df683c76993f14eba8 + docs/models/documentunion.md: + id: c65f9e42375c + last_write_checksum: sha1:249043e03067f79b27dc6eac410fb937920e8cdb + pristine_git_object: e573bd4632493ca648ad61307c70148366625d4b + docs/models/documentupload.md: + id: 7ff809a25eb0 + last_write_checksum: sha1:aea0f81009be09b153019abbc01b2918a1ecc1f9 + pristine_git_object: 4e58a475f1776431c9c27a0fcdd00dd96257801f + docs/models/documenturlchunk.md: + id: 48437d297408 + last_write_checksum: sha1:5f9294355929d66834c52c67990ba36a7f81387d + pristine_git_object: 9dbfbe5074de81b9fcf6f5bae8a0423fb2c82f71 + docs/models/embeddingdtype.md: + id: 22786e732e28 + last_write_checksum: sha1:dbd16968cdecf706c890769d8d1557298f41ef71 + pristine_git_object: 01656b0a85aa87f19909b18100bb6981f89683fc + docs/models/embeddingrequest.md: + id: bebee24421b4 + last_write_checksum: sha1:087230e81cfbbc539edc7cc1c0a490728276d217 + pristine_git_object: 71d139cdf5c556a1224d707be70f3fabe032fc27 + docs/models/embeddingrequestinputs.md: + id: 6a35f3b1910a + last_write_checksum: sha1:f3bf6b89f279f59010124aa402e282c7c691eb03 + pristine_git_object: a3f82c1c67c726d3ef8e5e5ea5513386acc7c2f4 + docs/models/embeddingresponse.md: + id: 31cd0f6b7bb5 + last_write_checksum: sha1:1d7351c68b075aba8e91e53d29bdab3c6dd5c3a2 + pristine_git_object: 2bd85b4d245978ec396da067060cfe892f19c64f + docs/models/embeddingresponsedata.md: + id: 89b078acdc42 + last_write_checksum: sha1:e3e9200948f864382e0ecd3e04240b13d013141a + pristine_git_object: 20b50618ac99c63f7cf57fe4377840bfc1f85823 + docs/models/encodingformat.md: + id: 066e154e4d43 + last_write_checksum: sha1:8d6c4b29dea5ff7b0ae2b586951308fad99c60eb + pristine_git_object: 7d5941cfe6cea2e85b20d6fb0031e9b807bac471 + docs/models/entitytype.md: + id: 130a2f7038b0 + last_write_checksum: sha1:01c3c10e737bcd58be70b437f7ee74632972a983 + pristine_git_object: 7c040b382d4c1b6bc63f582566d938be75a5f954 + docs/models/entry.md: + id: da9a99ab48ab + last_write_checksum: sha1:4971db390327db09f88feff5d2b8a0b1e6c5b933 + pristine_git_object: d934b6774b25713afe923154d7709755426ec2cf + docs/models/event.md: + id: 311c22a8574a + last_write_checksum: sha1:627793d6aed5e378e3f2eeb4087808eb50e948d5 + pristine_git_object: 3eebffca874b8614a5be3d75be3cb7b0e52c2339 + docs/models/file.md: + id: 4ad31355bd1c + last_write_checksum: sha1:ade4d3c908c664a07a3c333cc24bc1bfb43ab88b + pristine_git_object: 37cc418f9e5189c18f312c42060fd702e2963765 + docs/models/filechunk.md: + id: edc076728e9d + last_write_checksum: sha1:07ab5db503211adba2fa099e66d12ac3c4bbf680 + pristine_git_object: 18217114060ac4e4b45fefabace4628684f27e5c + docs/models/filepurpose.md: + id: ed6216584490 + last_write_checksum: sha1:02767595f85228f7bfcf359f8384b8263580d53a + pristine_git_object: 14cab13ee191ae60e2c5e1e336d0a5abc13f778b + docs/models/filesapiroutesdeletefilerequest.md: + id: 7fdf9a97320b + last_write_checksum: sha1:411e38d0e08a499049796d1557f79d669fc65107 + pristine_git_object: 1b02c2dbb7b3ced86ddb49c2323d1d88732b480c + docs/models/filesapiroutesdownloadfilerequest.md: + id: b9c13bb26345 + last_write_checksum: sha1:1f41dad5ba9bd63881de04d24ef49a0650d30421 + pristine_git_object: 8b28cb0e5c60ac9676656624eb3c2c6fdc8a3e88 + docs/models/filesapiroutesgetsignedurlrequest.md: + id: 08f3772db370 + last_write_checksum: sha1:26aa0140444ccef7307ef6f236932032e4784e8f + pristine_git_object: dbe3c801003c7bb8616f0c5be2dac2ab1e7e9fb1 + docs/models/filesapirouteslistfilesrequest.md: + id: 04bdf7c654bd + last_write_checksum: sha1:0a99755150c2ded8e5d59a96527021d29326b980 + pristine_git_object: 57d11722f1dba2640df97c22be2a91317c240608 + docs/models/filesapiroutesretrievefilerequest.md: + id: 2783bfd9c4b9 + last_write_checksum: sha1:a1249ef0aedb3056e613078488832c96b91f8cab + pristine_git_object: 961bae1f51a4ae9df21b28fd7a5ca91dc7b3888b + docs/models/fileschema.md: + id: 9a05a660399d + last_write_checksum: sha1:97987d64285ff3092635754c78ad7b68d863e197 + pristine_git_object: 4f3e72dba17a964155007755ad9d69f0304b2adb + docs/models/fimcompletionrequest.md: + id: b44677ecc293 + last_write_checksum: sha1:24bcb54d39b3fabd487549a27b4c0a65dd5ffe50 + pristine_git_object: fde0b625c29340e8dce1eb3026ce644b1885e53a + docs/models/fimcompletionrequeststop.md: + id: ea5475297a83 + last_write_checksum: sha1:a6cdb4bda01ac58016a71f35da48a5d10df11623 + pristine_git_object: a0dbb00a82a03acc8b62b81d7597722a6ca46118 + docs/models/fimcompletionresponse.md: + id: 050d62ba2fac + last_write_checksum: sha1:a6101a69e83b7a5bcf96ec77ba1cab8748f734f4 + pristine_git_object: cd62d0349503fd8b13582d0ba47ab9cff40f6b28 + docs/models/fimcompletionstreamrequest.md: + id: c881d7e27637 + last_write_checksum: sha1:f8755bc554dd44568c42eb5b6dde04db464647ab + pristine_git_object: ba62d854f030390418597cbd8febae0e1ce27ea8 + docs/models/fimcompletionstreamrequeststop.md: + id: c97a11b764e9 + last_write_checksum: sha1:958d5087050fdeb128745884ebcf565b4fdc3886 + pristine_git_object: 5a9e2ff020d4939f7fd42c0673ea7bdd16cca99d + docs/models/finetuneablemodeltype.md: + id: e16926b57814 + last_write_checksum: sha1:52006811b756ff5af865ed6f74838d0903f0ee52 + pristine_git_object: 34b24bd4db1ad3f9e77e2c6a45a41d2fbc5cf7fd + docs/models/finetunedmodelcapabilities.md: + id: 3a6a0498ccf7 + last_write_checksum: sha1:82fc7d3f4e0b591b757f202699bb645bc61c69ff + pristine_git_object: d3203a2adccb7eb89c58395952c3e5a123a5b31b + docs/models/format_.md: + id: a17c22228eda + last_write_checksum: sha1:dad6de59fec6378d50356007602e2a0254d8d2e4 + pristine_git_object: 97d286a4ed7cff0a4058bbfa06c4573428182876 + docs/models/ftclassifierlossfunction.md: + id: b546cfde5aa6 + last_write_checksum: sha1:752d9d238a90a3ef55205576fa38cee56ea1539e + pristine_git_object: 919cdd384315c99d4b590bc562298403733344ce + docs/models/ftmodelcard.md: + id: 15ed6f94deea + last_write_checksum: sha1:1c560ceaaacc1d109b2997c36de03192dfcda941 + pristine_git_object: 409f0526316a621b30dfbe45126c6b232e01fad4 + docs/models/function.md: + id: 416a80fba031 + last_write_checksum: sha1:a9485076d430a7753558461ce87bf42d09e34511 + pristine_git_object: b2bdb3fe82520ea79d0cf1a10ee41c844f90b859 + docs/models/functioncall.md: + id: a78cd1d7f605 + last_write_checksum: sha1:65bf78744b8531cdefb6a288f1af5cbf9d9e2395 + pristine_git_object: 7ccd90dca4868db9b6e178712f95d375210013c8 + docs/models/functioncallentry.md: + id: 016986b7d6b0 + last_write_checksum: sha1:373eb3a2d72596fcbb8933b28426896d5ac6b6f4 + pristine_git_object: 2843db9d36d8b82a15ebfce0833c8b0832609b4a + docs/models/functioncallentryarguments.md: + id: c4c609e52680 + last_write_checksum: sha1:ae88aa697e33d60f351a30052aa3d6e2a8a3e188 + pristine_git_object: f1f6e39e724673556a57059a4dbda24f31a4d4b9 + docs/models/functioncallentryconfirmationstatus.md: + id: 18f36160d744 + last_write_checksum: sha1:cc3ea4e03d26a1b22f94d42a87bd5ae63535d266 + pristine_git_object: 8948beb6d9ac647ada655960284dfc7f6d1f5ca1 + docs/models/functioncallevent.md: + id: cc9f2e603464 + last_write_checksum: sha1:58c6ee00af0c63614fd7506345977f9f2d8838ec + pristine_git_object: 0e3a36d6045a69e96c40836cdb586424225775af + docs/models/functioncalleventconfirmationstatus.md: + id: a33cc7957407 + last_write_checksum: sha1:36ac2d3442d83cbb1256e86f413134296bf8e90f + pristine_git_object: 4a3c8774d4eec4e1f5fea23a1827082e09f91669 + docs/models/functionname.md: + id: 4b3bd62c0f26 + last_write_checksum: sha1:754fe32bdffe53c1057b302702f5516f4e551cfb + pristine_git_object: 87d7b4852de629015166605b273deb9341202dc0 + docs/models/functionresultentry.md: + id: 24d4cb18998c + last_write_checksum: sha1:1758992e30517b505b8d0622a54545dc9ae19163 + pristine_git_object: 6a77abfd27e3e46de950646d7f89777dca11300e + docs/models/functiontool.md: + id: 5fb499088cdf + last_write_checksum: sha1:a9a3b6530b1c48a8575402b48cde7b65efb33a7d + pristine_git_object: 0226b7045c9d82186e1111bb2025e96a4de90bd6 + docs/models/getfileresponse.md: + id: a983b3c8acd6 + last_write_checksum: sha1:5ca732ae5b384937473c04de6736fbab34deca24 + pristine_git_object: 0edd13e0818fc70c9c4db1e08b1490c1e146ea63 + docs/models/getsignedurlresponse.md: + id: 5539e5d7c3d4 + last_write_checksum: sha1:7198474f48bfba6d47326cd436e4a00a8ba70ce3 + pristine_git_object: bde693236406fe092f48c315e3b68a2fbbe6f9a4 + docs/models/githubrepository.md: + id: 66c120df624b + last_write_checksum: sha1:045e538dd7faffc1c6c6e6816563c5c8e776a276 + pristine_git_object: 827b6f34ae68ace7b8b4811764f59de2e0fcdd22 + docs/models/githubrepositoryin.md: + id: b42209ef8423 + last_write_checksum: sha1:5ab33fc1b0b5513086b1cae07f416d502441db23 + pristine_git_object: 241cf584d5e2425e46e065f47a18bea50fa624db + docs/models/hyperparameters.md: + id: c167bad5b302 + last_write_checksum: sha1:e391cf72690e6cd01a2878081b8d87938e1c6639 + pristine_git_object: b6c00c3647d21789c92ad7d32dd29c3089ca134f + docs/models/imagedetail.md: + id: f8217529b496 + last_write_checksum: sha1:fdf19ac9459f64616240955cb81a84ef03e775c8 + pristine_git_object: 1e5ba3fd405a14e5e2872cc85504584dca19b726 + docs/models/imagegenerationtool.md: + id: d5deb6b06d28 + last_write_checksum: sha1:a1813ef99e4a0990fd073bb2311c475e88072029 + pristine_git_object: b476b6f2733a49767d7f7a4ad091fc321ab514f4 + docs/models/imageurl.md: + id: e75dd23cec1d + last_write_checksum: sha1:a5cf621ce58a9cc7c96afa7de53367eac7b4cb0b + pristine_git_object: 6358e0acb2dea4816203413842243704ca955783 + docs/models/imageurlchunk.md: + id: 4407097bfff3 + last_write_checksum: sha1:da7a792f7b649f311062338dfbf3d25ff55fe6c5 + pristine_git_object: db0c53d22e29fa25222edb86b264e5135879a029 + docs/models/imageurlunion.md: + id: 9d3c691a9db0 + last_write_checksum: sha1:4e32bcd7d44746d2ddbfafbef96152bb2bdb2a15 + pristine_git_object: db97130f26199dcb354ecb7469d09530b035daa2 + docs/models/inputentries.md: + id: a5c647d5ad90 + last_write_checksum: sha1:4231bb97837bdcff4515ae1b00ff5e7712256e53 + pristine_git_object: b44a467d258cfa8cc3d2a3236330471dbc3af109 + docs/models/inputs.md: + id: 4b0a7fb87af8 + last_write_checksum: sha1:c5f0c21c25fd5a698398a9e4ddf6261add60740c + pristine_git_object: d5771207d9725f04ca2ab1be692fc089360a58f4 + docs/models/instructrequest.md: + id: a0034d7349a2 + last_write_checksum: sha1:34a81411110cbb7a099c45e482f5d1702ae48fd3 + pristine_git_object: 5f0cdfff135fb72d3b1a81999a30b720c044e3d4 + docs/models/instructrequestmessage.md: + id: 380503708a09 + last_write_checksum: sha1:551b5d6dd3ba0b39cad32478213a9eb7549f0023 + pristine_git_object: 57ed27ab3b1430514797dd0073bc87b31e5e3815 + docs/models/jobmetadata.md: + id: 1f8e4c2f49e5 + last_write_checksum: sha1:a29ec10cd129b955672f60aaf526905780afe6b6 + pristine_git_object: 5d8a89ddc6b401a80e23d51cb378cdac5d4eb342 + docs/models/jobsapiroutesbatchcancelbatchjobrequest.md: + id: 798cb1ca1385 + last_write_checksum: sha1:67e8bda117608aee0e09a702a1ef8a4b03c40b68 + pristine_git_object: c19d0241784ff69bc68a11f405437400057d6f62 + docs/models/jobsapiroutesbatchgetbatchjobrequest.md: + id: e83a7ec84f8a + last_write_checksum: sha1:d661875832b4b9d5f545262216c9fcb9a77c8cd0 + pristine_git_object: 8c259bea9bef11f779fd609f1212565d574457e2 + docs/models/jobsapiroutesbatchgetbatchjobsrequest.md: + id: 5b9c44ad4d31 + last_write_checksum: sha1:1d7c05337b7cfe68f85a36576d060e1a890f9f96 + pristine_git_object: 5ceb0b2c40f079ffbe2cc4c82f6c3f94276980b4 + docs/models/jobsapiroutesfinetuningarchivefinetunedmodelrequest.md: + id: 8eb8c127091e + last_write_checksum: sha1:2b93a6bed5743461bb03c8337fb25dfc5a15522e + pristine_git_object: f9700df50b8f512c4139c1830aba18989d022b8e + docs/models/jobsapiroutesfinetuningcancelfinetuningjobrequest.md: + id: deff83b39b78 + last_write_checksum: sha1:dac8d8f2e95aed2db9b46711e6e80816881d5d14 + pristine_git_object: 883cbac685563d2e0959b63638f6b967ebdf1ee9 + docs/models/jobsapiroutesfinetuningcancelfinetuningjobresponse.md: + id: c45757ba1ed9 + last_write_checksum: sha1:4931469b58d454264f1e8d32df6a07d3f6f01022 + pristine_git_object: fb62eb62027c8151d597544fcaf27b972aeb78b3 + docs/models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md: + id: 8aa8030f26d7 + last_write_checksum: sha1:4aada0d2297479d8276f5a422cb4dd6b56b1e176 + pristine_git_object: 7b52e2ca6365f17ac3b19d128937783d87c7fa37 + docs/models/jobsapiroutesfinetuninggetfinetuningjobrequest.md: + id: a9b75762e534 + last_write_checksum: sha1:8f1395447928e089c88dce8c0ced1030ec5f0eba + pristine_git_object: fde19800303a901149bf39c5330ef8c4da87df62 + docs/models/jobsapiroutesfinetuninggetfinetuningjobresponse.md: + id: c0b31f4fc621 + last_write_checksum: sha1:4ceb9df28082bf5d496cd222a0f45dc81a576367 + pristine_git_object: f770532776a13860e697da7478d1677d16f0ec36 + docs/models/jobsapiroutesfinetuninggetfinetuningjobsrequest.md: + id: 52078f097503 + last_write_checksum: sha1:fc134fdc7e229b8df373b77096c8299c214171a7 + pristine_git_object: 23c52c342358ea889b25ee7b18b381b68519c6cf + docs/models/jobsapiroutesfinetuninggetfinetuningjobsstatus.md: + id: 8545ffb587d6 + last_write_checksum: sha1:bbc08ca53c2da180b96ed0347cf4954410c79311 + pristine_git_object: 40d57686aec11d9bdc4c116ea4c98183e0a6414c + docs/models/jobsapiroutesfinetuningstartfinetuningjobrequest.md: + id: b4e2b814d8c3 + last_write_checksum: sha1:f13b5c8f2e74cc73b58a30d366032c764603f95e + pristine_git_object: 4429fe480ab9486de98940a119ac63f40045313b + docs/models/jobsapiroutesfinetuningstartfinetuningjobresponse.md: + id: cfd848845787 + last_write_checksum: sha1:a165279fa0c9e051458ea4333dfdd31ef0440426 + pristine_git_object: 1a7e71d4479369f13c391a9782278557bc4531ae + docs/models/jobsapiroutesfinetuningunarchivefinetunedmodelrequest.md: + id: 75b5dd1bcbaa + last_write_checksum: sha1:dd30e7ff8748d26497458f3398c0547113dc058f + pristine_git_object: 95c1734daa7164bedeeb1fa58dd792939f25bc17 + docs/models/jobsapiroutesfinetuningupdatefinetunedmodelrequest.md: + id: 60bd2e28993a + last_write_checksum: sha1:58835c28cccaf90e99bbb72bf7c5a5ce42498824 + pristine_git_object: dbe49a86ca2bf64901133fd58a342d30909c35b2 + docs/models/jobsapiroutesfinetuningupdatefinetunedmodelresponse.md: + id: c265a30fd4cf + last_write_checksum: sha1:410c62a884aae902cdfbfcab33779e62487de13b + pristine_git_object: f40350bf9d74d09ca3a2ec6d91d9068bda631ef5 + docs/models/jsonschema.md: + id: a6b15ed6fac8 + last_write_checksum: sha1:523465666ad3c292252b3fe60f345c7ffb29053f + pristine_git_object: 7ff7c070353c58290416aff5b01d1dfc43905269 + docs/models/legacyjobmetadata.md: + id: 50ac14d9b270 + last_write_checksum: sha1:ebe37a176ca318e797fee7ebf4eef73fb9938a12 + pristine_git_object: 4705ab4f67e10b8e2cbfc86b29c03a9945aeb8fb + docs/models/librariesdeletev1request.md: + id: c0c3b2e1aabc + last_write_checksum: sha1:bef84f8851b06d2d914b605f11109de1850d0294 + pristine_git_object: 68d7e54369ce75422bf8b0ff16cada1c0ae2b05c + docs/models/librariesdocumentsdeletev1request.md: + id: 9d557bd7d1cc + last_write_checksum: sha1:1b580b657559356886915ee5579b90a03db19337 + pristine_git_object: efccdb1bbc36cf644ed2d1716cbd202e6d6bf6c5 + docs/models/librariesdocumentsgetextractedtextsignedurlv1request.md: + id: 27ad38ce4cb1 + last_write_checksum: sha1:b35ad610330232b395b5f87cc15f6ae270de6816 + pristine_git_object: 14ca66f72693f1df05eb93e0cca45f440b62d282 + docs/models/librariesdocumentsgetsignedurlv1request.md: + id: 4498715b6cfb + last_write_checksum: sha1:31f78079e31e070d080c99555cd2d85318fc4610 + pristine_git_object: 7c08c180d59a8e8475fea89424b8b2021d51385f + docs/models/librariesdocumentsgetstatusv1request.md: + id: c2219d3a3738 + last_write_checksum: sha1:44e79df94cf2686e83d7a2e793140a6a7b3a1c05 + pristine_git_object: e6d41875966348fd9e770d06c8099e48f0e64b5d + docs/models/librariesdocumentsgettextcontentv1request.md: + id: 850dfa465952 + last_write_checksum: sha1:4a1212e111525f4265d2924ce52f9c13d2787d4d + pristine_git_object: 2f58a4460ccdad531391318c62191e76c1ec22ac + docs/models/librariesdocumentsgetv1request.md: + id: cdd0df2f7e9d + last_write_checksum: sha1:36e5ef39552159044ecd28d20ee0792ea5bcadef + pristine_git_object: 6febc058425bb38857c391ee4c40d600858e6058 + docs/models/librariesdocumentslistv1request.md: + id: 7b5756e50d64 + last_write_checksum: sha1:2605b7972a3d7b4f73ab8052be4bf740f44f6f6f + pristine_git_object: 44f6300115853053214639982516a60b3268e778 + docs/models/librariesdocumentsreprocessv1request.md: + id: 1b8bf57b3f0a + last_write_checksum: sha1:8528785c1b4ae18d6ec6f261d29d5daac0d420a3 + pristine_git_object: 196ba17b749ce9efc1c30189864e474896814f85 + docs/models/librariesdocumentsupdatev1request.md: + id: b9147b1c0e38 + last_write_checksum: sha1:ed3ae7761990bd26a4bf99cd4641822eb90d3d57 + pristine_git_object: d46308509330099e30a53dddad51da8a6186aa92 + docs/models/librariesdocumentsuploadv1request.md: + id: 89a89d889c72 + last_write_checksum: sha1:32294a87d8a0b173b4d6f12b607a1bb3da765776 + pristine_git_object: 172a6183f31eec3142a84637414484799c2a4677 + docs/models/librariesgetv1request.md: + id: f47ad71ec7ca + last_write_checksum: sha1:3b2bf1e4f6069d0c954e1ebf95b575a32c4adeac + pristine_git_object: 6e1e04c39c15a85d96710f8d3a8ed11a22412816 + docs/models/librariessharecreatev1request.md: + id: 99e7bb8f7fed + last_write_checksum: sha1:e40d710ad1023768a0574b3283ef35544f6b0088 + pristine_git_object: 4c05241de4ee5a76df335ae9ea71004bd02b8669 + docs/models/librariessharedeletev1request.md: + id: bc8adba83f39 + last_write_checksum: sha1:79fc5a9a3cee5b060f29edd95f00e0fea32579cf + pristine_git_object: 850e22ab79863ba544f453138322c0eb5bf544cd + docs/models/librariessharelistv1request.md: + id: 86e6f08565e2 + last_write_checksum: sha1:6f2ffff66fa5fb141d930bca7bb56e978d62b4a5 + pristine_git_object: 98bf6d17ab013c1dd3f0ab18c37bbfc1a63f1b76 + docs/models/librariesupdatev1request.md: + id: f7e51b528406 + last_write_checksum: sha1:6a33b0161702ecc335dd2859df1bbc05b73702a9 + pristine_git_object: c5c142db7aaa49990135c21eabde43b8c0fdf756 + docs/models/library.md: + id: e8ec114dd107 + last_write_checksum: sha1:a4d6e9a777ce3d63aac24432281933ce6e13b4a9 + pristine_git_object: 4319f43df922b4924a11d494002826cb8d6dea0b + docs/models/listbatchjobsresponse.md: + id: e03025d58630 + last_write_checksum: sha1:de42c9396546fc8487d0bd6ed15b4076599fa83f + pristine_git_object: c23e32201d12a2594f97a493f63b2b7b42b9e337 + docs/models/listdocumentsresponse.md: + id: f2091cee0405 + last_write_checksum: sha1:335d0ccd3a448e65739d5a0cfa2c67614daec031 + pristine_git_object: 47b9d3b73fdc85bf6e463c91790faf346df56664 + docs/models/listfilesresponse.md: + id: b15df90d2d59 + last_write_checksum: sha1:4840f26427acf8846a9f1e48136f0663c6e4cd87 + pristine_git_object: 802f685fb3a76afb86a69cf41e6de9339cd6fbc7 + docs/models/listfinetuningjobsresponse.md: + id: d04e4dfddf78 + last_write_checksum: sha1:cebaf361aa10f1f6c4299c3c8a34f32d301455ad + pristine_git_object: 00251242023e2161747ebf00b4c2959909e2b654 + docs/models/listfinetuningjobsresponsedata.md: + id: 59c80de4086d + last_write_checksum: sha1:5a0d91c251b4b9283895d9f19f7b9416f93d4468 + pristine_git_object: adb0644475841c6a4686e8c42790dd44eed43dc1 + docs/models/listlibrariesresponse.md: + id: 87e3bec10745 + last_write_checksum: sha1:00522e685ec71a54f5f272d66b82e650848eaf36 + pristine_git_object: e21b9ced628f6fd5ae891d4a46666ebc94546859 + docs/models/listsharingout.md: + id: a3249129f37e + last_write_checksum: sha1:4831e4f02e1d5e86f138c7bb6b04d095aa4df30f + pristine_git_object: bcac4834f3bd008868435189f40bbf9e368da0d2 + docs/models/loc.md: + id: b071d5a509cc + last_write_checksum: sha1:09a04749333ab50ae806c3ac6adcaa90d54df0f1 + pristine_git_object: d6094ac2c6e0326c039dad2f6b89158694ef6aa7 + docs/models/messageentries.md: + id: 9af3a27b862b + last_write_checksum: sha1:a3eb6e37b780644313738f84e6c5ac653b4686bc + pristine_git_object: 76256fb913376a15d5bcd2531b18f1a78b980c9d + docs/models/messageinputcontentchunks.md: + id: 34aac9c271db + last_write_checksum: sha1:d8ffdfd8b5458497e2cb6a32f52900c3ca2a6ddf + pristine_git_object: 0561785082c741f39f930ab7ded5b6c6a9ade6ad + docs/models/messageinputentry.md: + id: eb74af2b9341 + last_write_checksum: sha1:c91bfdf9426c51236b6ff33d127dbe62b051a9da + pristine_git_object: f8514fb3305dbe1df91db8d622cc33a753b63623 + docs/models/messageinputentrycontent.md: + id: 7e12c6be6913 + last_write_checksum: sha1:6be8be0ebea2b93712ff6273c776ed3c6bc40f9a + pristine_git_object: 65e55d97606cf6f3119b7b297074587e88d3d01e + docs/models/messageoutputcontentchunks.md: + id: 802048198dc0 + last_write_checksum: sha1:8cf4e4ea6b6988e22c117d8f689bbfb0869816ad + pristine_git_object: c4a7777e7675ebf2384311ec82b2713da69e5900 + docs/models/messageoutputentry.md: + id: f969119c8134 + last_write_checksum: sha1:f50b955cd622a6160c0ada34b0e14bff612802b7 + pristine_git_object: 73a1c666acc913b96d65a124612c4a728882bbc9 + docs/models/messageoutputentrycontent.md: + id: 44019e6e5698 + last_write_checksum: sha1:d0cc7a8ebe649614c8763aaadbf03624bb9e47e3 + pristine_git_object: 5206e4eb0d95e10b46c91f9f26ae00407d2dd337 + docs/models/messageoutputevent.md: + id: b690693fa806 + last_write_checksum: sha1:a4157c087ff95fa9445757c9d363615718156164 + pristine_git_object: e09a965f7d4cc35d6b120ba5555d96ba7b3e8a27 + docs/models/messageoutputeventcontent.md: + id: cecea075d823 + last_write_checksum: sha1:16dac25382642cf2614e24cb8dcef6538be34914 + pristine_git_object: 16d8d52f6ff9f43798a94e96c5219314731ab5fb + docs/models/metric.md: + id: a812a3e37338 + last_write_checksum: sha1:14016848dcfaba90014b482634ed6d5715caa860 + pristine_git_object: 7f86303651650177ece51b82d867cab858e830ae + docs/models/mistralpromptmode.md: + id: d17d5db4d3b6 + last_write_checksum: sha1:abcb7205c5086169c7d9449d15ac142448a7d258 + pristine_git_object: c3409d03b9646e21a3793372d06dcae6fef95463 + docs/models/modelcapabilities.md: + id: 283fbc5fa32f + last_write_checksum: sha1:8a221e2334193907f84cf241ebaf6b86512bbd8b + pristine_git_object: c7dd2710011451c2db15f53ebc659770e786c4ca + docs/models/modelconversation.md: + id: 497521ee9bd6 + last_write_checksum: sha1:22a8d7502eeaf176fbd1c7b22b512b4f9e4e043f + pristine_git_object: af2e5c6149339a561b03b954cd0e71f9d9aeffd6 + docs/models/modelconversationtool.md: + id: 2dd28167bc36 + last_write_checksum: sha1:9b33f73330e5ae31de877a904954efe342e99c4f + pristine_git_object: 8723556753d077969bc665a423c057ae4ceaa0d2 + docs/models/modellist.md: + id: ce07fd9ce413 + last_write_checksum: sha1:b4c22b5eff4478ffa5717bd5af92ca79f4a90b01 + pristine_git_object: 85b20be7376f80cf169c25b3c7117079cd4c2828 + docs/models/modellistdata.md: + id: e2eb639c646f + last_write_checksum: sha1:7394ba5645f990163c4d777ebbfc71f24c5d3a74 + pristine_git_object: b44e84a00d0c54f8df78650d45de0a409c901048 + docs/models/moderationobject.md: + id: 4e84364835f5 + last_write_checksum: sha1:2831033dcc3d93d32b8813498f6eb3082e2d3c4e + pristine_git_object: 320b2ab4935f8751eb58794e8eb9e422de35ae7c + docs/models/moderationresponse.md: + id: e15cf12e553b + last_write_checksum: sha1:18e8f4b4b97cb444824fcdce8f518c4e5a27c372 + pristine_git_object: 75a5eec74071fdd0d330c9f3e10dac0873077f20 + docs/models/multipartbodyparams.md: + id: f5be2d861921 + last_write_checksum: sha1:34e68e3795c7987138abd152177fa07198d2f6f6 + pristine_git_object: f14b95737fde09a120b35e2f922568ca31825bd5 + docs/models/ocrimageobject.md: + id: b72f3c5853b2 + last_write_checksum: sha1:90c5158dec6a7b31c858677b6a8efa1e3cabd504 + pristine_git_object: 3c0d5544a80499b011467f29ef83d49f53801af6 + docs/models/ocrpagedimensions.md: + id: b3429f9883f5 + last_write_checksum: sha1:6435aa56e6153b0c90a546818ed780105ae1042a + pristine_git_object: c93ca64d5e20319ec6ec1bcb82b28c6ce0940f29 + docs/models/ocrpageobject.md: + id: 88a9e101b11e + last_write_checksum: sha1:091077fedf1b699d5160a21fe352056c247ef988 + pristine_git_object: 02473d44f73485fd7b7f0031d51bfac835d4036e + docs/models/ocrrequest.md: + id: 6862a3fc2d0f + last_write_checksum: sha1:2faa819df648d330074c177d8f5d4a9c9a27bc90 + pristine_git_object: dd3fc2ea28cc2bc147473ba9f73aa32a9528632a + docs/models/ocrresponse.md: + id: 30042328fb78 + last_write_checksum: sha1:8e4a4ae404ea752f3e9f1108c2a5f89ed6cfb143 + pristine_git_object: 0a309317644eedc643009b6cec3a7dbb142b1a15 + docs/models/ocrtableobject.md: + id: c967796380e6 + last_write_checksum: sha1:3b78858cc130fc8792ec3d149c8f657fd3f7a4c3 + pristine_git_object: 4e27697c15983f86274648b2d7bacac557081630 + docs/models/ocrusageinfo.md: + id: 419abbb8353a + last_write_checksum: sha1:6e717a3f3de3c464e8b3237f06867cdfecec339e + pristine_git_object: d9d79125cb02bc2b09d8dc543a5e2d4a6c55571c + docs/models/orderby.md: + id: 9e749ed80f72 + last_write_checksum: sha1:6ec002e3e59f37002ccb14e347b790ca4daef773 + pristine_git_object: bba50df10855a8d6acdf4b061ec2ffeb0279fd7f + docs/models/output.md: + id: 376633b966cd + last_write_checksum: sha1:600058f0b0f589d8688e9589762c45a0dd18cc9b + pristine_git_object: d0ee0db93f56c40f6684fcfdb5873aba586bc876 + docs/models/outputcontentchunks.md: + id: f7e175c8e002 + last_write_checksum: sha1:5adb0733a8ca9b224155dfef66dfb37b7f416972 + pristine_git_object: e5185014faa41b6e6d1567d713fc390f551fad01 + docs/models/paginationinfo.md: + id: 3d2b61cbbf88 + last_write_checksum: sha1:1da38e172024fe703f3180ea3c6ec91fe3c51ed0 + pristine_git_object: ad1fbb86c714c152a5e6e99d8a741e7346884e55 + docs/models/prediction.md: + id: 3c70b2262201 + last_write_checksum: sha1:ca8a77219e6113f2358a5363e935288d90df0725 + pristine_git_object: fae3c1ca4ba2c2ddb3b7de401ecdc8d56dcc7740 + docs/models/processingstatusout.md: + id: 83c8c59c1802 + last_write_checksum: sha1:7dbbfe790616ab4388e532bd78ffc1a5183b332d + pristine_git_object: bc40d3209c4c641dd7416c925b965c1bf7b73b1b + docs/models/processstatus.md: + id: "336054835357" + last_write_checksum: sha1:9b87de1980428307af6c29c2086c0e1f612ebd72 + pristine_git_object: 3a9c004e55cc31abb52d1f0bb450290465d42a1c + docs/models/realtimetranscriptionerror.md: + id: 4bc5e819565b + last_write_checksum: sha1:c93e4b19a0aa68723ea69973a9f22a581c7b2ff6 + pristine_git_object: e01f2126b3084eade47a26ea092556f7f61142c9 + docs/models/realtimetranscriptionerrordetail.md: + id: ea137b1051f1 + last_write_checksum: sha1:7e1d18760939d6087cda5fba54553141f8a78d1e + pristine_git_object: 5b34755dc67359bb884d5c2387608686ee527470 + docs/models/realtimetranscriptionerrordetailmessage.md: + id: d25137243bef + last_write_checksum: sha1:f8c3a4984d647d64e8ea4e1e42654265ffe46b0f + pristine_git_object: da3764ef56337bdc773eaf8e9aa747cbd1b407e2 + docs/models/realtimetranscriptioninputaudioappend.md: + id: fa2aa317d1ca + last_write_checksum: sha1:59cce0828505fdb55104cd3144b75334e0f31050 + pristine_git_object: 5ee365eb9a993933509ac4666bcec24bfcc6fccd + docs/models/realtimetranscriptioninputaudioend.md: + id: 11045f9cc039 + last_write_checksum: sha1:945ca0475826294e13aba409f3ae2c2fc49b1b67 + pristine_git_object: 393d208c6e242959161f4436d53cf4aa2df69a92 + docs/models/realtimetranscriptioninputaudioflush.md: + id: c2f2258e0746 + last_write_checksum: sha1:a4e6d160da44c6f57b01059f7198208702e9b06a + pristine_git_object: 367725baa278935a6a282338ca7f2a23895a86d8 + docs/models/realtimetranscriptionsession.md: + id: aeb0a0f87d6f + last_write_checksum: sha1:d72bf67442ac5e99f194c429e96a504685f02efb + pristine_git_object: 750bd7f79b65666812c6207d7085b9437c49517d + docs/models/realtimetranscriptionsessioncreated.md: + id: aa2ae26192d6 + last_write_checksum: sha1:d13fec916d05300c86b52e951e81b1ceee230634 + pristine_git_object: 34e603fd0a1cbc8007eef06decb158213faebeed + docs/models/realtimetranscriptionsessionupdated.md: + id: 56ce3ae7e208 + last_write_checksum: sha1:833db566b2c8a6839b43cb4e760f2af53a2d7f57 + pristine_git_object: 7e2719957aae390ee18b699e61fbc7581242942f + docs/models/realtimetranscriptionsessionupdatemessage.md: + id: 02a5eee40cdd + last_write_checksum: sha1:44f8e6bc8f8cd4087a7e86c85db5141fab90f78d + pristine_git_object: 2a50ca92720bad6605bdeafd83b43d0e8bf40615 + docs/models/realtimetranscriptionsessionupdatepayload.md: + id: 3ddd5a95510a + last_write_checksum: sha1:33bca4d547ca812d55ac49bf7b17851b2fecfc80 + pristine_git_object: d6c6547d7895e53be15a0cce46b6524178acc3bc + docs/models/referencechunk.md: + id: 07895f9debfd + last_write_checksum: sha1:4384049375a2566c7567599f97ce1ec19e9f6276 + pristine_git_object: d847e24845a399c7ca93d54701832fb65e01b3ab + docs/models/requestsource.md: + id: 8857ab6025c4 + last_write_checksum: sha1:4b7ecc7c5327c74e46e2b98bd6e3814935cdecdf + pristine_git_object: c81c115992439350d56c91d2e3351a13df40676b + docs/models/response.md: + id: 583c991c7a30 + last_write_checksum: sha1:0791cb4aa4045708ab64d42bf67bd6ab74bc7752 + pristine_git_object: ff67925758959b87992b47a1a32c224eeeb599e3 + docs/models/responsedoneevent.md: + id: 38c38c3c065b + last_write_checksum: sha1:4ac3a0fd91d5ebaccce7f4098ae416b56e08416f + pristine_git_object: 63d4cc06493e1ca12cf0e8ef800acfc0bdc9a02d + docs/models/responseerrorevent.md: + id: 3e868aa9958d + last_write_checksum: sha1:4711077bf182e4f3406dd12357da49d37d172b4c + pristine_git_object: 4309bdadc323918900cc4ca4fddb18788361d648 + docs/models/responseformat.md: + id: 50a1e4140614 + last_write_checksum: sha1:e877b2e81470ef5eec5675dfb91a47e74d5d3add + pristine_git_object: 5cab22f2bf1c412699f6a7ed18ef801ecbc3ee4b + docs/models/responseformats.md: + id: cf1f250b82db + last_write_checksum: sha1:105e1f9181913104b554051838cbdd0f728aa2c4 + pristine_git_object: 2f5f1e5511b048323fee18a0ffdd506fe2b3d56f + docs/models/responseretrievemodelv1modelsmodelidget.md: + id: 6143ec73bdd6 + last_write_checksum: sha1:6bae62cbb18559065a53f0acdacb1f72f513467e + pristine_git_object: ffbc1473d39c8266bb6b05b37677c98ca1d10858 + docs/models/responsestartedevent.md: + id: 88e3b9f0aa8d + last_write_checksum: sha1:156f38bbe8278f9c03117135938e7cbdae3038b9 + pristine_git_object: e2f421af866690b34c2d9fa4595a63e9172a65f5 + docs/models/responsev1conversationsget.md: + id: 48d4a45780a9 + last_write_checksum: sha1:8e75db359f0d640a27498d20c2ea6d561c318d7e + pristine_git_object: 844c5d610a9a351532d12b1a73f6c660059da76b + docs/models/retrievemodelv1modelsmodelidgetrequest.md: + id: ac567924689c + last_write_checksum: sha1:7534c5ec5f1ae1e750c8f610f81f2106587e81a9 + pristine_git_object: f1280f8862e9d3212a5cfccd9453884b4055710a + docs/models/role.md: + id: b694540a5b1e + last_write_checksum: sha1:c7ef39a81299f3156b701420ef634a8b4fab76f0 + pristine_git_object: 853c6257d9bdb4eda9cb37e677d35ab477dca812 + docs/models/sampletype.md: + id: 0e09775cd9d3 + last_write_checksum: sha1:33cef5c5b097ab7a9cd6232fe3f7bca65cd1187a + pristine_git_object: 34a6a012b1daeeb22626417650269e9376cc9170 + docs/models/security.md: + id: 452e4d4eb67a + last_write_checksum: sha1:45b7b8881a6560a468153662d61b99605a492edf + pristine_git_object: 2e0839d06f821dd97780dc22f202dedf23e4efe1 + docs/models/shareenum.md: + id: 53a713500576 + last_write_checksum: sha1:9d45d4bd272e6c146c3a8a21fd759acf2ae22148 + pristine_git_object: dc5d2b68a810c2983b5a47fbff747dfc2cc17598 + docs/models/sharingdelete.md: + id: 165cac179416 + last_write_checksum: sha1:1a0b3c95f4b56173510e234d7a76df85c593f360 + pristine_git_object: 1dcec0950c7fcd264ea9369c24244b54ba2bcfbf + docs/models/sharingin.md: + id: 08d396ee70ad + last_write_checksum: sha1:662edfc07a007e94fe1e54a07cf89d7c83c08df5 + pristine_git_object: bac18c8d43f801e8b5cf5b3cd089f9da0ee2281a + docs/models/sharingout.md: + id: 5db4547c7c56 + last_write_checksum: sha1:bd15c318d1a3f5bee7d7104d34cbd8ba6233bbb8 + pristine_git_object: 35aeff43593f3c9067c22a2f8b1468d7faa5af34 + docs/models/source.md: + id: 6541ef7b41e7 + last_write_checksum: sha1:d0015be42fe759d818ebd75b0cec9f83535a3b89 + pristine_git_object: bb1ed6124647b02c4350123bf257b0bf17fc38fd + docs/models/ssetypes.md: + id: 6a902241137c + last_write_checksum: sha1:567027284c7572c0fa24132cd119e956386ff9d0 + pristine_git_object: ae06b5e870d31b10f17224c99af1628a7252bbc3 + docs/models/systemmessage.md: + id: fdb7963e1cdf + last_write_checksum: sha1:c7603c5ce77ba2bcbda9eff65eeafdb1e9ecbec7 + pristine_git_object: 10bda10f921fb5d66c1606ff18e654b4e78ab197 + docs/models/systemmessagecontent.md: + id: 94a56febaeda + last_write_checksum: sha1:6cb10b4b860b4204df57a29c650c85c826395aeb + pristine_git_object: 0c87baf3c2fade64a2738a9a4b3ce19647e5dc9a + docs/models/systemmessagecontentchunks.md: + id: cea1c19e9d7a + last_write_checksum: sha1:986aec0f8098158515bbccd0c22e0b3d4151bb32 + pristine_git_object: 40030c170746d9953d25b979ab7e6f522018e230 + docs/models/tableformat.md: + id: d8cd08c55c3c + last_write_checksum: sha1:e0736ea9576466d71821aa1e67fc632cc5a85414 + pristine_git_object: 54f029b814fdcfa2e93e2b8b0594ef9e4eab792a + docs/models/textchunk.md: + id: 6cd12e0ef110 + last_write_checksum: sha1:d9fe94c670c5e0578212752c11a0c405a9da8518 + pristine_git_object: df0e61c32bc93ef17dbba50d026edace139fee6a + docs/models/thinkchunk.md: + id: bca24d7153f6 + last_write_checksum: sha1:0f861f1653035dea2018be9a977c15f54add9531 + pristine_git_object: 70c0369f16465e1b1f5f46e8cd799e5db536cdde + docs/models/thinkchunkthinking.md: + id: 22de7b5060fb + last_write_checksum: sha1:5e0722b8d513b38d60fbfe28635bdea40b951593 + pristine_git_object: dd1ecca12b5cda76a51b1e13335f1757a9dd7a68 + docs/models/timestampgranularity.md: + id: eb4d5a8e6f08 + last_write_checksum: sha1:e256a5e8c6010d500841295b89d88d0eface3b88 + pristine_git_object: d20012ea9925446c16c9162304642ba48391d34d + docs/models/tool.md: + id: 8966139dbeed + last_write_checksum: sha1:1725bf53fc9f1ca3f332322d91de24c9d58adc6a + pristine_git_object: fb661f72887271d5bb470e4edf025a32b00ade17 + docs/models/toolcall.md: + id: 80892ea1a051 + last_write_checksum: sha1:cb27b9d36cfe6227978c7a7a01b1349b6bac99d9 + pristine_git_object: 3819236b9f3eee2f6878818cfbbe2817e97f7de2 + docs/models/toolcallconfirmation.md: + id: 944eebb142ff + last_write_checksum: sha1:864ccb39a00094d965b764235e74709945abca3d + pristine_git_object: 1812f7d687d83f5692d9e79709e56813ab2c79b1 + docs/models/toolchoice.md: + id: "097076343426" + last_write_checksum: sha1:25b33b34da02c3b46349dc8b6223f9ae18370d16 + pristine_git_object: 373046bbbc834169293b4f4ae8b2e238f952ddde + docs/models/toolchoiceenum.md: + id: 15410de51ffc + last_write_checksum: sha1:ca0cf9bf128bebc8faedd9333cc6a56b30f58130 + pristine_git_object: 0be3d6c54b13a8bf30773398a2c12e0d30d3ae58 + docs/models/toolconfiguration.md: + id: 06bfa2c4e662 + last_write_checksum: sha1:9b619977375f228c76f09d48d6e2833add6c07e2 + pristine_git_object: 89286a172124ce3473bcb081de6e4db8c95afefa + docs/models/toolexecutiondeltaevent.md: + id: f2fc876ef7c6 + last_write_checksum: sha1:ae1462a9b5cb56002b41f477ce262cb64ccf2f4e + pristine_git_object: 7066f3485407707500e5006335279bfa37db8705 + docs/models/toolexecutiondeltaeventname.md: + id: 93fd3a3b669d + last_write_checksum: sha1:d5dcdb165c220209ee76d81938f2d9808c77d4fc + pristine_git_object: 9c3edef8c0698d7293a71ee56410a0ed67fd1924 + docs/models/toolexecutiondoneevent.md: + id: b604a4ca5876 + last_write_checksum: sha1:6b6975ded0b0495b6c56250d153186c7818b5958 + pristine_git_object: b2d81be3cfa3e1dd0d1a58ef5ad16556c5e953c7 + docs/models/toolexecutiondoneeventname.md: + id: d19dc0060655 + last_write_checksum: sha1:aa5677087e6933699135a53f664f5b86bbae5ac6 + pristine_git_object: 6449079d7b467796355e3353f4245046cced17e8 + docs/models/toolexecutionentry.md: + id: 75a7560ab96e + last_write_checksum: sha1:668d8fbc59bc729bf4b1d95d2f2bfe4097701c0e + pristine_git_object: 03316381b130cf02751b10fef4129c8f23072b76 + docs/models/toolexecutionentryname.md: + id: 86d537762559 + last_write_checksum: sha1:6c528cdfbb3f2f7dc41d11f57c86676f689b8845 + pristine_git_object: fb762a5382d8b0e93dc2eb277f18adf810057c55 + docs/models/toolexecutionstartedevent.md: + id: 37657383654d + last_write_checksum: sha1:5a020d24bdeb4eb9976ce93a8daa91947026bde9 + pristine_git_object: 189b8a3d3b22d73000850a3f1a95b85e358c2090 + docs/models/toolexecutionstartedeventname.md: + id: be6b33417678 + last_write_checksum: sha1:f8857baa02607b0a0da8d96d130f1cb765e3d364 + pristine_git_object: 3308c483bab521f7fa987a62ebd0ad9cec562c3a + docs/models/toolfilechunk.md: + id: 67347e2bef90 + last_write_checksum: sha1:2e4c6ce703733c02e62467507c231033716fdb92 + pristine_git_object: d60021755729f1a2870e24a500b3220c8f1fc6e3 + docs/models/toolfilechunktool.md: + id: eafe1cfd7437 + last_write_checksum: sha1:73a31dbff0851612f1e03d8fac3dbbee77af2df0 + pristine_git_object: aa5ac8a99a33d8c511f3d08de93e693bf75fb2a1 + docs/models/toolmessage.md: + id: 0553747c37a1 + last_write_checksum: sha1:ac61e644ba7c6da607cb479eafd1db78d8e8012e + pristine_git_object: 7201481e61e269b238887deec30c03f7e16c53d7 + docs/models/toolmessagecontent.md: + id: f0522d2d3c93 + last_write_checksum: sha1:783769c0200baa1b6751327aa3e009fa83da72ee + pristine_git_object: 5c76091fbd2c8e0d768921fab19c7b761df73411 + docs/models/toolreferencechunk.md: + id: 10414b39b7b3 + last_write_checksum: sha1:ea3bdfc83177c6b7183ad51fddb2d15aee0f0729 + pristine_git_object: 49ea4ca7b05e5fcaaf914f781e3a28483199d82d + docs/models/toolreferencechunktool.md: + id: c2210d74792a + last_write_checksum: sha1:368add3ac6df876bc85bb4968de840ac578ae623 + pristine_git_object: 999f7c34885015a687c4213d067b144f1585c946 + docs/models/tooltypes.md: + id: adb50fe63ea2 + last_write_checksum: sha1:f224c3d8732450b9c969b3e04027b7df7892694c + pristine_git_object: 84e49253c9b9bd1bd314e2a126106404cbb52f16 + docs/models/trainingfile.md: + id: 4039958e8930 + last_write_checksum: sha1:d02543c2d1446e56501f2ac358a09669b0077648 + pristine_git_object: cde218bb2281a1274d013844ad76b4b2a34b986c + docs/models/transcriptionresponse.md: + id: 39e2354aca38 + last_write_checksum: sha1:7b32e2179c3efc675c05bba322cc33554a9ff9db + pristine_git_object: 1bc0189c5d1833c946a71c9773346e21b08d2404 + docs/models/transcriptionsegmentchunk.md: + id: f09db8b2273e + last_write_checksum: sha1:d4a7ebd6a8cc512a0bd00a49af4130c533254b44 + pristine_git_object: d7672c0eebb55243965306c94a771aa18ed641d6 + docs/models/transcriptionstreamdone.md: + id: 2253923d93cf + last_write_checksum: sha1:2a1910d59be258af8dd733b8911e5a0431fab5a4 + pristine_git_object: bca69a2b02e069ce240342d76ac408aec67993a9 + docs/models/transcriptionstreamevents.md: + id: d0f4eedfa2b6 + last_write_checksum: sha1:ec6b992049bd0337d57baab56603b1fa36a0a35b + pristine_git_object: f760385dfbd9779e63d61ec6357901bc9b4ca8e9 + docs/models/transcriptionstreameventsdata.md: + id: 506af75a0708 + last_write_checksum: sha1:99fcb3bf3aab0fb87dc02a4e6ccef9271ff0ae89 + pristine_git_object: eea8e9281634c56517e28f613afee38e0b0846ad + docs/models/transcriptionstreameventtypes.md: + id: 701782e8a63d + last_write_checksum: sha1:ff79dfb5d942c807b03c9e329a254bfa95b99a16 + pristine_git_object: e4eb25a6400dcc5a48b5eb5f65e96f7be91fa761 + docs/models/transcriptionstreamlanguage.md: + id: 5e9df200153c + last_write_checksum: sha1:d5626a53dde8d6736bab75f35cee4d6666a6b795 + pristine_git_object: 63fcfbc63a65cdff4228601e8a46f9d003ec9210 + docs/models/transcriptionstreamsegmentdelta.md: + id: f59c3fb696f2 + last_write_checksum: sha1:7d6999abf5a01fc01c0d5302acd3218e535adc9a + pristine_git_object: 1b652a3b6dc4406a3b7efa8a412b15ca0a5d765f + docs/models/transcriptionstreamtextdelta.md: + id: 69a13554b554 + last_write_checksum: sha1:d969f462034ed356f2c8713b601ee7d873d4ce07 + pristine_git_object: 77bd0ddcf8a1d95707fa9e041de3a47bb9e7f56d + docs/models/unarchivemodelresponse.md: + id: a690f43df567 + last_write_checksum: sha1:5c9d4b78c92d30bb4835cb724d1ea22a19bf5327 + pristine_git_object: 375962a7110f814288ea9f72323383bd8194e843 + docs/models/updateagentrequest.md: + id: 371bfedd9f89 + last_write_checksum: sha1:97170995ed40391023f0dce5096cfebe83fa7dc8 + pristine_git_object: d3428d92a8f23670a6b587a6017a353d2c12a815 + docs/models/updateagentrequesttool.md: + id: bdf961d2c886 + last_write_checksum: sha1:5355f8c97b2aef98aebff251e1f4830ddbaa7881 + pristine_git_object: e358b1edb9035667104700dde890bb0b43074543 + docs/models/updatedocumentrequest.md: + id: ee4e094a6aa7 + last_write_checksum: sha1:4c4d774c67449402eb7e1476b9d0fef5b63f2b99 + pristine_git_object: 7e0b41b7be9f559b27a3430f46ed53d0453f6e03 + docs/models/updatelibraryrequest.md: + id: 2eda82f12f31 + last_write_checksum: sha1:436e08988daa8ca04ece36a4790ed84e0629b81a + pristine_git_object: aaffc5a9f0d588ff935db2ec2c079af9f162c2c3 + docs/models/updatemodelrequest.md: + id: 8eabdced3e0e + last_write_checksum: sha1:96879df11c005b591f2e59975897feff8fc8656e + pristine_git_object: 56b84c59c48ac135345394235c71ce77d384e33e + docs/models/usageinfo.md: + id: ec6fe65028a9 + last_write_checksum: sha1:cf71fb9676d870eba7c4d10a69636e1db4054adc + pristine_git_object: f5204ac94a4d6191839031c66c5a9bc0124a1f35 + docs/models/usermessage.md: + id: ed66d7a0f80b + last_write_checksum: sha1:f0ed7d9cb7264f1d9e4a9190772df3f15e25346c + pristine_git_object: e7a932ed71496fa7cc358388c650d25f166f27a4 + docs/models/usermessagecontent.md: + id: 52c072c851e8 + last_write_checksum: sha1:1de02bcf7082768ebe1bb912fdbebbec5a577b5a + pristine_git_object: 8350f9e8f8996c136093e38760990f62fd01f8cf + docs/models/utils/retryconfig.md: + id: 4343ac43161c + last_write_checksum: sha1:562c0f21e308ad10c27f85f75704c15592c6929d + pristine_git_object: 69dd549ec7f5f885101d08dd502e25748183aebf + docs/models/validationerror.md: + id: 304bdf06ef8b + last_write_checksum: sha1:1889f608099577e6a116c14b211a6811d6b22786 + pristine_git_object: 7a1654a1a5cfb3ab92360b361e8e962bf2db4582 + docs/models/wandbintegration.md: + id: ba1f7fe1b1a3 + last_write_checksum: sha1:ef35648cec304e58ccd804eafaebe9547d78ddcf + pristine_git_object: c73952d9e79ea8e08bc1c17817e74e3650def956 + docs/models/wandbintegrationresult.md: + id: 729c2601b338 + last_write_checksum: sha1:49f442907815de4661a85a3521803d80b953a17e + pristine_git_object: d12bc31191ba534a9744d78f657c19e7f93f777a + docs/models/websearchpremiumtool.md: + id: 267988aa8c3f + last_write_checksum: sha1:38f80a43f73a13ddedc7730f853c092a48b665f9 + pristine_git_object: 78b736cd314617caa0d77f3c42015212e37ab539 + docs/models/websearchtool.md: + id: fc4df52fb9b5 + last_write_checksum: sha1:72636dc7ae74264bb5158d284ef6f83da5290b27 + pristine_git_object: 4ca7333c412ad819e3e02c61debe402e3f9b0af9 + docs/sdks/accesses/README.md: + id: 2ea167c2eff2 + last_write_checksum: sha1:279d3b3a4f625b89b25e9a2a47886ac6008b3ca0 + pristine_git_object: c50456df9ea2bb71f78a83ad28f90e089d2e2cd7 + docs/sdks/agents/README.md: + id: 5965d8232fd8 + last_write_checksum: sha1:a73ae6719acef32b47be55ea5c5684e91f7eda68 + pristine_git_object: 8a60837030b9e5dd0adca0d07d9f0266158b080f + docs/sdks/batchjobs/README.md: + id: a3b8043c6336 + last_write_checksum: sha1:b4b3123ff210545048e2b0c729f2b7e5f7460f4e + pristine_git_object: 3633fe4ee136c1ac90f9446425f62a0d68fa4f90 + docs/sdks/betaagents/README.md: + id: 5df79b1612d8 + last_write_checksum: sha1:9ec1c7a967bc653fe175a7986ddec74d5feb0714 + pristine_git_object: aaa5110e6db30f5450877b67d70d46e53b98996b + docs/sdks/chat/README.md: + id: 393193527c2c + last_write_checksum: sha1:5e7a43def5636140d70a7c781ed417e527ce9819 + pristine_git_object: 1bf4aeadc762f5d696c278eefaa759f35993e9d5 + docs/sdks/classifiers/README.md: + id: 74eb09b8d620 + last_write_checksum: sha1:9f11740f8cf1a3af44fff15b63916305f1882505 + pristine_git_object: dc0f4984380b5b137266421e87a1505af5260e89 + docs/sdks/conversations/README.md: + id: e22a9d2c5424 + last_write_checksum: sha1:4c5f8ea93d560956cb23c26e0d5f6d7cbc129e07 + pristine_git_object: e77d329b735dc21f620470bcf82220a79bc34e18 + docs/sdks/documents/README.md: + id: 9758e88a0a9d + last_write_checksum: sha1:ac7ab2598066971e8b371a3e73aa266ec697df1b + pristine_git_object: 9c219b6709d5d5bfa28113efca92012e8c5a5112 + docs/sdks/embeddings/README.md: + id: 15b5b04486c1 + last_write_checksum: sha1:4a279bf9bcd84a9878ef979c78b8b75af3d52f02 + pristine_git_object: cb207d8be2ca86b00dc797fc06eabd1498adb770 + docs/sdks/files/README.md: + id: e576d7a117f0 + last_write_checksum: sha1:f5861c42227b901742fd8afe7155ed6d634b1b4c + pristine_git_object: 9507326be83eaf750daa12c0b1421d819b72340d + docs/sdks/fim/README.md: + id: 499b227bf6ca + last_write_checksum: sha1:5b2ce811df8d867d14fe0126f2c9619cca779f56 + pristine_git_object: 49151bf5be49ce6554679bc5c30906894a290ecb + docs/sdks/finetuningjobs/README.md: + id: 03d609f6ebdd + last_write_checksum: sha1:2d7ff255c1462d5f1dff617a1993e730ec3911ea + pristine_git_object: 4262b3a9833180ce86da43a26ee7ab27403f2cd0 + docs/sdks/libraries/README.md: + id: df9a982905a3 + last_write_checksum: sha1:e3eb0e9efb3f758fdf830aa1752c942d59a4f72b + pristine_git_object: 7df1ef4e26449af572412f052ee7ad189039544f + docs/sdks/models/README.md: + id: b35bdf4bc7ed + last_write_checksum: sha1:2aa91ffe637c049aed0d63d24ac39688b6ecb270 + pristine_git_object: 311a2db6e213902ac5a2c27acf19f856dae2c264 + docs/sdks/ocr/README.md: + id: 545e35d2613e + last_write_checksum: sha1:da377d75b6b7480c335d7f721bb06fe11492be38 + pristine_git_object: fde2a82339e10c74aca6d1b4168b62501d7bbf83 + docs/sdks/transcriptions/README.md: + id: 089cf94ecf47 + last_write_checksum: sha1:15d118796f147bc5b0bf4146ba39bfa9edfbc996 + pristine_git_object: 97703c9b4dc942385ee04ae96cbd100c3f632a17 + py.typed: + id: 258c3ed47ae4 + last_write_checksum: sha1:8efc425ffe830805ffcc0f3055871bdcdc542c60 + pristine_git_object: 3e38f1a929f7d6b1d6de74604aa87e3d8f010544 + scripts/publish.sh: + id: fe273b08f514 + last_write_checksum: sha1:b290b25b36dca3d5eb1a2e66a2e1bcf2e7326cf3 + pristine_git_object: c35748f360329c2bc370e9b189f49b1a360b2c48 + src/mistralai/client/__init__.py: + id: f1b791f9d2a5 + last_write_checksum: sha1:c05dc9845d3361c4aae7796b079ac0e7952e8606 + pristine_git_object: 4b79610a3fc8222fc8f9adeeaf798e894708fc06 + src/mistralai/client/_hooks/__init__.py: + id: cef9ff97efd7 + last_write_checksum: sha1:9a6f060871150610f890cc97676c3afe9050b523 + pristine_git_object: 66a04e3727ffcc2c427d854cdbb4f5f340af050f + src/mistralai/client/_hooks/sdkhooks.py: + id: ed1e485b2153 + last_write_checksum: sha1:e592d5ab277827b988257b4df3e746508ca91b23 + pristine_git_object: ecf94240a5689c8b248add46509bc7a7982d8437 + src/mistralai/client/_hooks/types.py: + id: 85cfedfb7582 + last_write_checksum: sha1:40294e852f818a974034c33e510e0f8723fcaf31 + pristine_git_object: 036d44b8cfc51599873bd5c401a6aed30450536c + src/mistralai/client/_version.py: + id: cc807b30de19 + last_write_checksum: sha1:a48c2cc43ac028eb2e31a129a9551ad2fda3d33f + pristine_git_object: 805648e42e48831658907f664d6536e8bdcd98c0 + src/mistralai/client/accesses.py: + id: 76fc53bfcf59 + last_write_checksum: sha1:ed94623aa8a2bd502572a699a2f54c9281ec283e + pristine_git_object: 0761b0bc6080ab0d891be70089a1908d435559fa + src/mistralai/client/agents.py: + id: e946546e3eaa + last_write_checksum: sha1:7049cab7c308888c88b0341fb29f0132e154e3cb + pristine_git_object: 2b70d1520663d999773159d89b1f9dc96f7fbf97 + src/mistralai/client/audio.py: + id: 7a8ed2e90d61 + last_write_checksum: sha1:e202d775d24c0303053e0548af83fcb04e2748f4 + pristine_git_object: f68f063c08a099d07904456daa76d8e2d2ecdbe6 + src/mistralai/client/basesdk.py: + id: 7518c67b81ea + last_write_checksum: sha1:2cea76931db51175b2c787d0c707f08e9944c22f + pristine_git_object: a976121bd224d64497e5006cb58dd728f6a67144 + src/mistralai/client/batch.py: + id: cffe114c7ac7 + last_write_checksum: sha1:b452983f67b33f26e1faa60fdbbb171cb1877224 + pristine_git_object: 7e36fd0d73ebeb873f74f4109896a6cf3bb7d2ba + src/mistralai/client/batch_jobs.py: + id: 3423fec25840 + last_write_checksum: sha1:34de0e986e7c0e4377f70125d319e522280c565f + pristine_git_object: 0e135b30cd122d1a813ee67bf2f9037953448e73 + src/mistralai/client/beta.py: + id: 981417f45147 + last_write_checksum: sha1:85f42fc6c2318eef94c90405b985120220c9c617 + pristine_git_object: 65b761d18f7274cc33162a83efa5b33211f78952 + src/mistralai/client/beta_agents.py: + id: b64ad29b7174 + last_write_checksum: sha1:7c900a6b1483108a367050440667c069b08fbb92 + pristine_git_object: 157c5de4c66273e6df468f8a12b4399f9efb32fb + src/mistralai/client/chat.py: + id: 7eba0f088d47 + last_write_checksum: sha1:520b0da011d63c60bd0d3a960a410a8f4a6a3e22 + pristine_git_object: 13b9c01f035c4fd6f60b78f20a1801bedf3b582b + src/mistralai/client/classifiers.py: + id: 26e773725732 + last_write_checksum: sha1:ee94a4e50cda893f9c19c2304adda8b23fc2de9e + pristine_git_object: 67199b601e38dff6fc6a4317eb845fbde6c25de0 + src/mistralai/client/conversations.py: + id: 40692a878064 + last_write_checksum: sha1:1101b9e374010ba9cb080c30789672cfcfc45c55 + pristine_git_object: ec33b1fb12d1923ef5f686ed09c5fe5ae889e40c + src/mistralai/client/documents.py: + id: bcc17286c31c + last_write_checksum: sha1:37669f51eba1b352a5e3c7f3a17d79c27c7ea772 + pristine_git_object: b3130364c0f3cc90ed1e4407a070bd99e3cce606 + src/mistralai/client/embeddings.py: + id: f9c17258207e + last_write_checksum: sha1:d1610bf12dba8b2f8cb27d2f0aa592594dfe6b3a + pristine_git_object: 5d55ffc43c0c98d46e04b238ab23a08d1b9e6a6a + src/mistralai/client/errors/__init__.py: + id: 0b2db51246df + last_write_checksum: sha1:0befddc505c9c47388683126750c7ad0e3fbef52 + pristine_git_object: 58a591a1cc2896f26df2075ffca378ca6c982d1e + src/mistralai/client/errors/httpvalidationerror.py: + id: ac3de4a52bb6 + last_write_checksum: sha1:73251adb99a07d11b56d0bc0399a2362ff9ccdba + pristine_git_object: 97b165629c39ab6e24406eb3f13970414b73f8f7 + src/mistralai/client/errors/mistralerror.py: + id: d1f57f0ff1e9 + last_write_checksum: sha1:30065cdd7003ec02cb3463d7c63229c4ff97503c + pristine_git_object: eb73040c5b5251018695204fde80eac914b35dae + src/mistralai/client/errors/no_response_error.py: + id: 8b469ecb0906 + last_write_checksum: sha1:0b3fdb1136472c41a4a739a5cbf9e2a4ce0c63a4 + pristine_git_object: d71dfa7b24146f1390ac6830e61acf337b99ca83 + src/mistralai/client/errors/responsevalidationerror.py: + id: 6cfaa3147abe + last_write_checksum: sha1:6862d178d4d1964bc03db47b76709aa406546981 + pristine_git_object: a7b3b9f0207846b5f176076b9f400e95cb08ebb9 + src/mistralai/client/errors/sdkerror.py: + id: c489ffe1e9ca + last_write_checksum: sha1:f708168e46c2960dd51896083aee75ccdb36f9dd + pristine_git_object: 25b87255a51021079f8ba5cc60b43509e12f9a4d + src/mistralai/client/files.py: + id: f12df4b2ce43 + last_write_checksum: sha1:a16c8702d15339200b09c62948c06f79e720d79c + pristine_git_object: a5f3adf6dd9b60a202c70edf7d2a148a626ce471 + src/mistralai/client/fim.py: + id: 217bea5d701d + last_write_checksum: sha1:dc427c9e954dfb9a7fe2df8b5c544877a28cdc73 + pristine_git_object: 8ffb7730a03398322dfdd6c83724096d4924c5c5 + src/mistralai/client/fine_tuning.py: + id: 5d5079bbd54e + last_write_checksum: sha1:fe1f774df4436cc9c2e54ed01a48db573eb813cd + pristine_git_object: df6bc5643a13294ddfbeecc6ae84d00cd7199bed + src/mistralai/client/fine_tuning_jobs.py: + id: fa1ea246e0b2 + last_write_checksum: sha1:8cbf3827f5c2e43170192de39be498af0bf24cf0 + pristine_git_object: c2ee871bb1ccf7e3e24081121a7e54f1483eee5c + src/mistralai/client/httpclient.py: + id: 3e46bde74327 + last_write_checksum: sha1:0f4ecc805be1dc3d6e0ca090f0feb7d988f6eb9d + pristine_git_object: 544af7f87d6b7097935290bebd08e30e5f485672 + src/mistralai/client/libraries.py: + id: d43a5f78045f + last_write_checksum: sha1:6440b3df71fe557ecba5c23768d115efd4ceb26f + pristine_git_object: b8728362b87349118ac6f163f50613dd18c43340 + src/mistralai/client/models/__init__.py: + id: e0e8dad92725 + last_write_checksum: sha1:0ac0c956f0f87979e871a00c32884ee3102b6d2b + pristine_git_object: 7d2dfd970d48d54d798f1661206abdc697134434 + src/mistralai/client/models/agent.py: + id: 1336849c84fb + last_write_checksum: sha1:6090ddf2b5b40656dfbf3325f1022a40ae418948 + pristine_git_object: 686a6eb84ecd27e725e3773b3f7773dddac1c10c + src/mistralai/client/models/agentaliasresponse.py: + id: 3899a98a55dd + last_write_checksum: sha1:d7e12ea05431361ad0219f5c8dee11273cd60397 + pristine_git_object: 6972af2a4ae846e63d2c70b733ecd6c8370ee0cd + src/mistralai/client/models/agentconversation.py: + id: 1b7d73eddf51 + last_write_checksum: sha1:28718fb00dbe74241712b4f7a3fbce2d060f7e86 + pristine_git_object: da30c6634294cdaba459b68ca8877d867ee052fb + src/mistralai/client/models/agenthandoffdoneevent.py: + id: 82628bb5fcea + last_write_checksum: sha1:829c5a152e6d737ffd65a3b88b0b2890e6703764 + pristine_git_object: e2609e3d1fb62b132eb53112eb2bdc4ae855085f + src/mistralai/client/models/agenthandoffentry.py: + id: 5030bcaa3a07 + last_write_checksum: sha1:c9544755ad6d3a3831f8afe446c6a9a523eb5137 + pristine_git_object: f92ef2cc7310d5df94436f3067a640d3848405f0 + src/mistralai/client/models/agenthandoffstartedevent.py: + id: 2f6093d9b222 + last_write_checksum: sha1:c9f86e01497c53f3c1806dbb9fdff6e2d9993323 + pristine_git_object: 2a4023419212fec8b3f0e83d506a25b17408a8b1 + src/mistralai/client/models/agents_api_v1_agents_create_or_update_aliasop.py: + id: 23a832f8f175 + last_write_checksum: sha1:237d6b4419615c9c26f96d49760732bd7b4617e7 + pristine_git_object: 04761ae786c35e6fa6cd5a896a5e52458cb3a5d5 + src/mistralai/client/models/agents_api_v1_agents_delete_aliasop.py: + id: 9c9947e768d3 + last_write_checksum: sha1:385faebecef8479d1a72a7ab6f15ddcc611dad87 + pristine_git_object: 291a9802a7d49108fc0d428610cb4c37b42f0796 + src/mistralai/client/models/agents_api_v1_agents_deleteop.py: + id: 95adb6768908 + last_write_checksum: sha1:f222a61a73ba2f37051fffbf2d19b3b81197d998 + pristine_git_object: 5e41fdcdbf182e993acd71603ecb8c9a14e48043 + src/mistralai/client/models/agents_api_v1_agents_get_versionop.py: + id: ef9914284afb + last_write_checksum: sha1:c99ee098f659a56cb365c280cc29de441916b48a + pristine_git_object: 941863d0f8143020200bb5566ce66d527c4369c8 + src/mistralai/client/models/agents_api_v1_agents_getop.py: + id: f5918c34f1c7 + last_write_checksum: sha1:b90285965e2aaccaf989e59b8f1db4a53ae8b31c + pristine_git_object: dd17580dd0041a979fc6c9c7349d14a3e200f5d3 + src/mistralai/client/models/agents_api_v1_agents_list_version_aliasesop.py: + id: a04815e6c798 + last_write_checksum: sha1:b4b5c4e8566f1d0c68a14aba94b7ffea257fd7ce + pristine_git_object: bb1da6020386fabfbd606db9a098a0e9323ce3b0 + src/mistralai/client/models/agents_api_v1_agents_list_versionsop.py: + id: 19e3310c3907 + last_write_checksum: sha1:6628e9ff747c579e11fa9a756cee3b11c57c476d + pristine_git_object: 54b62e90e23c1782a0b068460d6877cac3b28916 + src/mistralai/client/models/agents_api_v1_agents_listop.py: + id: 25a6460a6e19 + last_write_checksum: sha1:0abe889b85470b28917368a2b958a13303bd38f1 + pristine_git_object: 97b1c7f1a070be5e12e1a32ad56dbcfcb0f1cd68 + src/mistralai/client/models/agents_api_v1_agents_update_versionop.py: + id: 63f61b8891bf + last_write_checksum: sha1:e9046cf75e008e856f00dda8725cbb16d83cd394 + pristine_git_object: 5ab821ea413d656dc7194f3588c8987c3e720831 + src/mistralai/client/models/agents_api_v1_agents_updateop.py: + id: bb55993c932d + last_write_checksum: sha1:bc922e15651d7bb33b841d9b3ae247843b6a5426 + pristine_git_object: 69da5001007916e458cab6caf8c10073c8fbc7d6 + src/mistralai/client/models/agents_api_v1_conversations_append_streamop.py: + id: ec00e0905f15 + last_write_checksum: sha1:d0a253c2f383241378e6fab35a38427d0a1dd827 + pristine_git_object: d257dc789cdc4f57bb91d1788335d2d49442d02f + src/mistralai/client/models/agents_api_v1_conversations_appendop.py: + id: 39c6125e850c + last_write_checksum: sha1:864ece4ddcd65075547daa1ab996ba7cfe9939fc + pristine_git_object: 61fec0834e6e05a56a7ee5c984fb0401f9c72f5c + src/mistralai/client/models/agents_api_v1_conversations_deleteop.py: + id: 0792e6abbdcb + last_write_checksum: sha1:9725fce86a52b4995a51e1995ca114c0c4b414df + pristine_git_object: 499645a77782e29db61e439060340fee787799c1 + src/mistralai/client/models/agents_api_v1_conversations_getop.py: + id: c530f2fc64d0 + last_write_checksum: sha1:241e5a07f37fa88f1e5011615b3e2b47a1aaf6a7 + pristine_git_object: 504616abbf0c9d0595f2aae81c59e52352cee323 + src/mistralai/client/models/agents_api_v1_conversations_historyop.py: + id: 2f5ca33768aa + last_write_checksum: sha1:fccc3e1a3f48eff31463829037a440be667a7da1 + pristine_git_object: ef0a4eb084de52d4bde435ee9751aaa12e61dcc3 + src/mistralai/client/models/agents_api_v1_conversations_listop.py: + id: 936e36181d36 + last_write_checksum: sha1:e3e52cf7967b9b78099db9449cb33e3ded34d111 + pristine_git_object: 8bf66aea23f16734c1f9e03629aaf7246e4e60b4 + src/mistralai/client/models/agents_api_v1_conversations_messagesop.py: + id: b5141764a708 + last_write_checksum: sha1:17fd503da7fb20198792c6e25f94dcc0a1e5db05 + pristine_git_object: 19978a194e2dd633fe89bcee7ceac177fcdd6629 + src/mistralai/client/models/agents_api_v1_conversations_restart_streamop.py: + id: c284a1711148 + last_write_checksum: sha1:2e462249c8ab71376c5f6179a2c033e254165f3e + pristine_git_object: 63c744498dfbdd254f2e780d90a680b10100ee63 + src/mistralai/client/models/agents_api_v1_conversations_restartop.py: + id: 3ba234e5a8fc + last_write_checksum: sha1:5dd06d300dbe8832b72d868657dc4c58f0ebaad5 + pristine_git_object: 3186d5df9000d4a62c0fbc64a601e6b709803deb + src/mistralai/client/models/agentscompletionrequest.py: + id: 3960bc4c545f + last_write_checksum: sha1:5d81a0421184ed547208e8ea7cff47b18fc00788 + pristine_git_object: 6955f6acb023fd842d9ec46a694d270a66911c0e + src/mistralai/client/models/agentscompletionstreamrequest.py: + id: 1b73f90befc2 + last_write_checksum: sha1:b46298a653359bca205b6b1975bcd1909e563dff + pristine_git_object: c2cf35522236f29ca1b9f2a438dfc79a59ca3e2a + src/mistralai/client/models/apiendpoint.py: + id: 00b34ce0a24d + last_write_checksum: sha1:733e852bf75956acd2c72a23443627abfa090b7b + pristine_git_object: a6665c1076f05c28936510c24ee7d3498d7e7a24 + src/mistralai/client/models/archivemodelresponse.py: + id: 2d22c644df64 + last_write_checksum: sha1:d0f67fd2bc5a6e8de4f2b0a5742ceb4a1f7f5ab8 + pristine_git_object: f1116850c8bf0159c4146d4973988ea5d0fe7de7 + src/mistralai/client/models/assistantmessage.py: + id: 2b49546e0742 + last_write_checksum: sha1:dcfa31c2aac95a0d7bd748e96bd87a5c85c0d1f6 + pristine_git_object: 26a778c70439d21b890e85f2c85dbb560e8bffef + src/mistralai/client/models/audiochunk.py: + id: ce5dce4dced2 + last_write_checksum: sha1:d3c2e28583d661a9583c40c237430a1f63ea7631 + pristine_git_object: 68866cd2c3c640cf56258f2f98b8a2385ea6fcdb + src/mistralai/client/models/audioencoding.py: + id: b14e6a50f730 + last_write_checksum: sha1:92ca06dce513cd39b2c7d9e5848cf426b40598ce + pristine_git_object: 67fec75d72845b8dda774c96767a0b233f269fe5 + src/mistralai/client/models/audioformat.py: + id: c8655712c218 + last_write_checksum: sha1:8ee7b564d106b601b6ad8a9321c27dfff421ce5e + pristine_git_object: fef87ae76b31128ebd5ced4278e274c249181c23 + src/mistralai/client/models/audiotranscriptionrequest.py: + id: e4148b4d23e7 + last_write_checksum: sha1:a6ef85be4ae24aa79c8c3fa9dcaf055e0ba9b266 + pristine_git_object: fe4c79e3427fae3e022bd936236d2934eaa76b60 + src/mistralai/client/models/audiotranscriptionrequeststream.py: + id: 33a07317a3b3 + last_write_checksum: sha1:6e648ff58a70a0a3bd63a24676122b80eba4baf7 + pristine_git_object: 2d1e9269b51d84cd8b21643fe04accd00839b013 + src/mistralai/client/models/basemodelcard.py: + id: 556ebdc33276 + last_write_checksum: sha1:e2c3d1effee5b434fea9b958c0dd54fa96143924 + pristine_git_object: 9c9e9a2045a10f4606f11ee5886a19ccf03bbf0e + src/mistralai/client/models/batcherror.py: + id: 1563e2a576ec + last_write_checksum: sha1:51c9e9a4d306c2de45dc0879ade62daed3fc2972 + pristine_git_object: 8a353cd2dc06a8c6f2db3d6b613cfdca8278f57e + src/mistralai/client/models/batchjob.py: + id: 85cd28932cc7 + last_write_checksum: sha1:532a8c6ca8546052159e5e5174cf65ce17a62f3f + pristine_git_object: 80acac336883c23b621d0dc647fef20548bf061a + src/mistralai/client/models/batchjobstatus.py: + id: 61e08cf5eea9 + last_write_checksum: sha1:78934183519948464385245cbc89efb68ac00bfb + pristine_git_object: bd77faa2fbed74b19a8d3884af6d43bc1b4806e0 + src/mistralai/client/models/batchrequest.py: + id: 6f36819eeb46 + last_write_checksum: sha1:b2a71163e37a9483e172dc13b6320749bee38f2f + pristine_git_object: 911a9a0554b9b8cb6dedcb3a86a06c39890b875e + src/mistralai/client/models/builtinconnectors.py: + id: 2d276ce938dc + last_write_checksum: sha1:4ceb3182009b6535c07d652ccf46661b553b6272 + pristine_git_object: ecf60d3c1a83028d9cf755d4c9d5459f6b56e72a + src/mistralai/client/models/chatclassificationrequest.py: + id: afd9cdc71834 + last_write_checksum: sha1:a29088359142ebd6409f45569168b2096014119e + pristine_git_object: cf2aa78af3ffc747d557422b83551075b83e601d + src/mistralai/client/models/chatcompletionchoice.py: + id: 7e6a512f6a04 + last_write_checksum: sha1:de0281a258140f081012b303e3c14e0b42acdf63 + pristine_git_object: 2c515f6e9a290ebab43bae41e07493e4b99afe8f + src/mistralai/client/models/chatcompletionrequest.py: + id: 9979805d8c38 + last_write_checksum: sha1:1f0390718ab06126a05e06797ef6af310ccab543 + pristine_git_object: e871bd92733ac400fdfeb2cf4f66fc32a7584103 + src/mistralai/client/models/chatcompletionresponse.py: + id: 669d996b8e82 + last_write_checksum: sha1:97f164fea881127ac82303e637b6a270e200ac5b + pristine_git_object: 7092bbc18425091d111ec998b33edc009ff0931b + src/mistralai/client/models/chatcompletionstreamrequest.py: + id: 18cb2b2415d4 + last_write_checksum: sha1:c197792ed1dd78159ab0b970f8f76087ff2c4d6b + pristine_git_object: b7b2bff138cee9c130fa01d6157d8b6c21ea5a9c + src/mistralai/client/models/chatmoderationrequest.py: + id: 057aecb07275 + last_write_checksum: sha1:7677494c0e36ccbc201384cb587abeb852a1a924 + pristine_git_object: 228e7d26b8b172c3e11f01d4f260bf6e5195b318 + src/mistralai/client/models/checkpoint.py: + id: 1a530d3674d8 + last_write_checksum: sha1:418f08c61b64fa7ffb053c6f5912e211acab1330 + pristine_git_object: c24e433eb4787146620fb48b6d301f51a4db5067 + src/mistralai/client/models/classificationrequest.py: + id: 6942fe3de24a + last_write_checksum: sha1:7bd416d4b0e083efbf9324107263027140702ddb + pristine_git_object: 25b6941355cb9629abb9c0f09fb6fd191c56ffa6 + src/mistralai/client/models/classificationresponse.py: + id: eaf279db1109 + last_write_checksum: sha1:64522aa2b0970e86a0133348411592f95163f374 + pristine_git_object: d2f09f430c4bca39ea9e5423b7d604ea4016fc70 + src/mistralai/client/models/classificationtargetresult.py: + id: 2445f12b2a57 + last_write_checksum: sha1:2b8b9aeadee3b8ffe21efd1e0c842f9094c4ecc7 + pristine_git_object: 6c7d6231d211977332100112900ea0f8cdf5d84c + src/mistralai/client/models/classifierfinetunedmodel.py: + id: 5a9a7a0153c8 + last_write_checksum: sha1:853bf1b3b941ec3aebeb17ac2caf38fa0dd094de + pristine_git_object: fbcf5892d7f0a3ed8b3872d71dd95ed3a25463d1 + src/mistralai/client/models/classifierfinetuningjob.py: + id: a244d5f2afc5 + last_write_checksum: sha1:ceb13935702275025284bb77aa8bf5ccf926e19c + pristine_git_object: fb160cf8e16a1b4899f8bb2803b18ba1f55232ce + src/mistralai/client/models/classifierfinetuningjobdetails.py: + id: 75c5dee8df2e + last_write_checksum: sha1:6b3f2f7ca3bd4e089591f5f9c59b7e28a00447f8 + pristine_git_object: 5d73f55ee0f1321fdeeb4db1971e144953e8e27f + src/mistralai/client/models/classifiertarget.py: + id: 2177d51d9dcf + last_write_checksum: sha1:c801dacc31e2d7682285a9a41d8ef38fa2e38fb9 + pristine_git_object: 4d66d789a42a0bc8762998161f1ad801bd8d96d4 + src/mistralai/client/models/classifiertargetresult.py: + id: 19c343844888 + last_write_checksum: sha1:3f5b37de3585cb38a3e41f0ee49dc4b5a33bf925 + pristine_git_object: 8ce7c0ca167b38ebaf1e5fc6393ab56d9f142cfa + src/mistralai/client/models/classifiertrainingparameters.py: + id: 4000b05e3b8d + last_write_checksum: sha1:d7ce2f1017463c52856b973d696c9abecf5f79e3 + pristine_git_object: 14fa4926f8b5b62aa6b5d8864c40d5acf66e7b15 + src/mistralai/client/models/codeinterpretertool.py: + id: 950cd8f4ad49 + last_write_checksum: sha1:8c3d91805d6c5f5cc9d249216694781faf15ea68 + pristine_git_object: ce14265f6d312c3da52014d2a058b6a730d5c980 + src/mistralai/client/models/completionargs.py: + id: 3db008bcddca + last_write_checksum: sha1:e3d36235610c0546d8a2f2bb0a1db0f953747d88 + pristine_git_object: ab5cf5ff2d4df92d00664803f9274696ae80216d + src/mistralai/client/models/completionargsstop.py: + id: 5f339214501d + last_write_checksum: sha1:744878976d33423327ea257defeff62073dad920 + pristine_git_object: 39c858e66380044e11d3c7fd705334d130f39dea + src/mistralai/client/models/completionchunk.py: + id: d786b44926f4 + last_write_checksum: sha1:15f1b57b696b46bf6986c8f1a53d6bbf8d2351e2 + pristine_git_object: 5fd6c173ef29fb9bf2f570e0c2300268221e1ad3 + src/mistralai/client/models/completionevent.py: + id: c68817e7e190 + last_write_checksum: sha1:dc43ac751e4e9d9006b548e4374a5ec44729eea4 + pristine_git_object: 3b90ab0c1ecac12f90e0ae3946a6b61410247e4f + src/mistralai/client/models/completionfinetunedmodel.py: + id: f08c10d149f5 + last_write_checksum: sha1:5fbd8c5475c250cbed1c2d2f47de372e8e92b128 + pristine_git_object: 54a1c1656aea1954288e9144670c939e29a83c47 + src/mistralai/client/models/completionfinetuningjob.py: + id: c242237efe9b + last_write_checksum: sha1:e4352be2411c7026c054a6fe380b87242183d4e4 + pristine_git_object: 1bf0a730c389be30bac2acfa17ffc6b5891e4918 + src/mistralai/client/models/completionfinetuningjobdetails.py: + id: e8379265af48 + last_write_checksum: sha1:b11c9bdc161da6a5cbd9f35f4bc5b51f0f3cea9c + pristine_git_object: cb7870219b261e260feceb6109088b0bbf8a6408 + src/mistralai/client/models/completionresponsestreamchoice.py: + id: 5969a6bc07f3 + last_write_checksum: sha1:59730cdaeeb3e95f4d38f63c34a4e491f40e6010 + pristine_git_object: a52ae892fcaafe54918160d055ee2badac31404e + src/mistralai/client/models/completiontrainingparameters.py: + id: be202ea0d5a6 + last_write_checksum: sha1:1a797019770795edcd911ff5b3580bedb83c05f4 + pristine_git_object: ca50a7ad521b46f275dd3a39c98911f13ee527c8 + src/mistralai/client/models/contentchunk.py: + id: c007f5ee0325 + last_write_checksum: sha1:b921b03b4c1e300b0e3f51ea9eadd4d7c4b7a0ea + pristine_git_object: e3de7591a089a3739af17108cecdc2d4240f10bf + src/mistralai/client/models/conversationappendrequest.py: + id: 81ce529e0865 + last_write_checksum: sha1:bdae860241893ec3ab3f22bd57c45dede2927da3 + pristine_git_object: 386714fd6dcccff8abb2247d7474949d9e8e79f8 + src/mistralai/client/models/conversationappendstreamrequest.py: + id: 27ada745e6ad + last_write_checksum: sha1:0a563cb146c4806ee6a133d10e7af8839e6f38dd + pristine_git_object: 32f6b148c647d3bac8edada3b941c51c17d78901 + src/mistralai/client/models/conversationevents.py: + id: 8c8b08d853f6 + last_write_checksum: sha1:2eedde1ecf31061fb13de0b1bdc9ea311897b570 + pristine_git_object: 17812983f3aee3e675d44f46ca1b741315c2139a + src/mistralai/client/models/conversationhistory.py: + id: 60a51ff1682b + last_write_checksum: sha1:8984a0b12766e350022796a44baf6aac4c93f79b + pristine_git_object: ceef115b70ff02da05ac97571a177edf5b5f6cf6 + src/mistralai/client/models/conversationinputs.py: + id: 711b769f2c40 + last_write_checksum: sha1:5fc688af61d6a49ede9c9709069f3db79f4dc615 + pristine_git_object: 7ce3ffc3772926a259d714b13bfc4ee4e518f8f7 + src/mistralai/client/models/conversationmessages.py: + id: 011c39501c26 + last_write_checksum: sha1:95e3abe55199f2118e6fb7e5d8520af6a929449a + pristine_git_object: 84664b62337dcdc408bb01e0494fa598e6a86832 + src/mistralai/client/models/conversationrequest.py: + id: 58e3ae67f149 + last_write_checksum: sha1:f7a67082e06c1789f4c6a4c56bfef5f21cce5034 + pristine_git_object: 83d599ebf984f1df2390d97dbe651881f7dee0e2 + src/mistralai/client/models/conversationresponse.py: + id: ad7a8472c7bf + last_write_checksum: sha1:99148d75abcb18c91ba0a801174461346508f5fb + pristine_git_object: f6c10969a931eaf1a4667b0fcff3765f57658b15 + src/mistralai/client/models/conversationrestartrequest.py: + id: 681d90d50514 + last_write_checksum: sha1:99123cee7c54f44c02b56111305af399143b4e5a + pristine_git_object: 7ae16aff4de36a91093d3021b66283e657b00897 + src/mistralai/client/models/conversationrestartstreamrequest.py: + id: 521c2b5bfb2b + last_write_checksum: sha1:abfd14652b4785c36de84a59593b55f7a6a2d613 + pristine_git_object: 0e247261d997ac3d8ff0155ba54cc4cafe9ac65a + src/mistralai/client/models/conversationstreamrequest.py: + id: 58d633507527 + last_write_checksum: sha1:7dc25a12979f4082ed7d7e37584bb9c30297f196 + pristine_git_object: a20dccae1a60753ed95f59da0df78c204c19d515 + src/mistralai/client/models/conversationthinkchunk.py: + id: 77e59cde5c0f + last_write_checksum: sha1:5db067661a5d4b0c13db92ad93da1aab9e0e7a34 + pristine_git_object: e0e172e3edbe46c000e82e712c135b96a65312e9 + src/mistralai/client/models/conversationusageinfo.py: + id: 6685e3b50b50 + last_write_checksum: sha1:3e0489836936a7a77fa3b41adde1eb459ecd176d + pristine_git_object: 1e80f89ee4f7a3d464df2bf39990b467029e86c1 + src/mistralai/client/models/createagentrequest.py: + id: 442629bd914b + last_write_checksum: sha1:273dde9338cc1eb166ee40f4c6215f90cae908ab + pristine_git_object: 54b09880eefe348d2e003ed1b238b67cb58b8e34 + src/mistralai/client/models/createbatchjobrequest.py: + id: 56e24cd24e98 + last_write_checksum: sha1:e648017622cd6e860cb15e5dd2b29bf9f2a00572 + pristine_git_object: 9a901fefee0ea6a825274af6fd0aa5775a61c521 + src/mistralai/client/models/createfileresponse.py: + id: fea5e4832dcc + last_write_checksum: sha1:b7f3ba95a09a3225eae80b53152fe2b7d3806fbe + pristine_git_object: 768212803bc3535ac8a27a9c0d48f147e3d536b7 + src/mistralai/client/models/createfinetuningjobrequest.py: + id: c60d2a45d66b + last_write_checksum: sha1:2e8e608140860bba9ecfa9498d61cf807f96680a + pristine_git_object: e328d944ce2a71ffbec027965d31075070647dbc + src/mistralai/client/models/createlibraryrequest.py: + id: 1c489bec2f53 + last_write_checksum: sha1:45fa65be82712ce99304027c88f953f0932bdae4 + pristine_git_object: 58874e014275b06ce19d145aaa34a48d11ca0950 + src/mistralai/client/models/delete_model_v1_models_model_id_deleteop.py: + id: 767aba526e43 + last_write_checksum: sha1:73568f2f450bf9c23aca3649372a92e1b9a2fc54 + pristine_git_object: 199614f53501f34088cb112d6fe1114e1e588d8a + src/mistralai/client/models/deletefileresponse.py: + id: 3ee464763a32 + last_write_checksum: sha1:2c0df66fc8c4384d50e54ac03577da3da2997cf5 + pristine_git_object: ffd0e0d015e38e5f6113da036ebeba98441444f4 + src/mistralai/client/models/deletemodelout.py: + id: ef6a1671c739 + last_write_checksum: sha1:d67ac7c3fa143be40c74455c7206c94bfb5a2134 + pristine_git_object: fa0c20a419c59b8fc168c150b28d703398ea7f40 + src/mistralai/client/models/deltamessage.py: + id: 68f53d67a140 + last_write_checksum: sha1:b18350de03a8685bea5ac52e1441415b5e58bdf4 + pristine_git_object: d9fa230e93d4e0886f21c836cf3813855eb8f9fd + src/mistralai/client/models/document.py: + id: fbbf7428328c + last_write_checksum: sha1:db2e184f7cc97b24e7ec80887c35c32222afd8a8 + pristine_git_object: fcc5bca541a814143b0489528fd20a69004104b1 + src/mistralai/client/models/documentlibrarytool.py: + id: 3eb3c218f457 + last_write_checksum: sha1:d03a6136192b56778bd739d834d9bdc80a09cc23 + pristine_git_object: 642c3202b11c5bb8a2b41cf8ae0fe43f73aa2a81 + src/mistralai/client/models/documenttextcontent.py: + id: e730005e44cb + last_write_checksum: sha1:c86f4b15e8fda1cd5c173da01462342cd22b7286 + pristine_git_object: b6904cb4267347b62a457a01b91a391500326da9 + src/mistralai/client/models/documenturlchunk.py: + id: 4309807f6048 + last_write_checksum: sha1:33cdaccb3a4f231730c7fa1db9f338a71e6311b2 + pristine_git_object: 43444d98b8b7fb430f9c33562c35072d9c79a263 + src/mistralai/client/models/embeddingdtype.py: + id: 77f9526a78df + last_write_checksum: sha1:a4e2ce6d00e6d1db287a5d9f4254b0947227f337 + pristine_git_object: 732c4ebe3678563ebcdbafd519f93317261586fb + src/mistralai/client/models/embeddingrequest.py: + id: eadbe3f9040c + last_write_checksum: sha1:3ef4e321a698c4a10389280f8b1c7d0da20f4faf + pristine_git_object: 5fa2d2f63796523e58e281f2d3bcc05a92111842 + src/mistralai/client/models/embeddingresponse.py: + id: f7d790e84b65 + last_write_checksum: sha1:9bb53a5a860c8e10d4d504648d84da73068c0a83 + pristine_git_object: 6ffd68941f32f396998df9dded14ff8365926608 + src/mistralai/client/models/embeddingresponsedata.py: + id: 6d6ead6f3803 + last_write_checksum: sha1:ba5f38ee6e2b0436532229da01ba79ee49c20d12 + pristine_git_object: 098cfae06eae6a92830b4b5a26985f5d5950e512 + src/mistralai/client/models/encodingformat.py: + id: b51ec296cc92 + last_write_checksum: sha1:ea907f86b00323d99df37f7ff45d582aace798e7 + pristine_git_object: 4a39d0295f9069ae9f749cf21dab450eaf145d19 + src/mistralai/client/models/entitytype.py: + id: 62d6a6a13288 + last_write_checksum: sha1:015e2db9e8e5a3e4ce58442ccedaf86c66239dde + pristine_git_object: 56d82cbed237f32a8b00cfee4042dfe3e7053bcb + src/mistralai/client/models/event.py: + id: e5a68ac2dd57 + last_write_checksum: sha1:8ed848fe2e74c7f18ee8f4dcba39ad1c951c16d2 + pristine_git_object: c40ae2b1a1b8131a90c637e3268872b97b22683e + src/mistralai/client/models/file.py: + id: f972c39edfcf + last_write_checksum: sha1:609381a40a4bfdda2e7e750a848cd2bb38d6ac0f + pristine_git_object: 1b0ea1d4a288d9723dcdd7cfda99d49c5cbd9e7c + src/mistralai/client/models/filechunk.py: + id: ff3c2d33ab1e + last_write_checksum: sha1:d7561c39252b81007a8e079edb4f23989ffd510e + pristine_git_object: 5c8d2646dc0d5c732828bdd81c5a58e12fa92a42 + src/mistralai/client/models/filepurpose.py: + id: a11e7f9f2d45 + last_write_checksum: sha1:8b167c02f9f33e32d5fd1c6de894693924f4d940 + pristine_git_object: 49a5568ff82ad4a85e15c8de911e8d6c98dcd396 + src/mistralai/client/models/files_api_routes_delete_fileop.py: + id: 2f385cc6138f + last_write_checksum: sha1:ccfd3ff64635cfd511f49c5e02a6f1860c479966 + pristine_git_object: eaba274b9dd94d6cf729325316b3e3e9b3834566 + src/mistralai/client/models/files_api_routes_download_fileop.py: + id: 8184ee3577c3 + last_write_checksum: sha1:81058ede2a5eb333b54561f99ed7878082c0f411 + pristine_git_object: 83de8e73a3d50917e4a41bb92a828a10e646a632 + src/mistralai/client/models/files_api_routes_get_signed_urlop.py: + id: 0a1a18c6431e + last_write_checksum: sha1:ef4908b9d2e43c0256d25a5aa533c5bdc1205113 + pristine_git_object: 64cd6ac57b4f2de70403e11062307a8d8d5d94e7 + src/mistralai/client/models/files_api_routes_list_filesop.py: + id: b2e92f2a29b4 + last_write_checksum: sha1:71e67fc63f0df28c534d4bd03a6464ae88959dc2 + pristine_git_object: b03e2f886ce02d4beabca150302a924ae63ad507 + src/mistralai/client/models/files_api_routes_retrieve_fileop.py: + id: 5d5dbb8d5f7a + last_write_checksum: sha1:d451d8d2b32f412158a074919cca1a72f79940cb + pristine_git_object: 5f8de05f1bba07517dc2ee33a4f05122503b54b5 + src/mistralai/client/models/files_api_routes_upload_fileop.py: + id: f13b84de6fa7 + last_write_checksum: sha1:d38a86b9e7d338278e14c68756654d85bc330070 + pristine_git_object: 54ff4e4951a58e13993be0f5d2c16b0cb11c0978 + src/mistralai/client/models/fileschema.py: + id: 19cde41ca32a + last_write_checksum: sha1:0b3acb889a2c70998da4076e2f4eef3698e8b117 + pristine_git_object: e99066a9eb19daebcf29f356225635a297c444e1 + src/mistralai/client/models/fimcompletionrequest.py: + id: cf3558adc3ab + last_write_checksum: sha1:20bca1f6a0ab6e84f48b6e332f0c3242da84ae45 + pristine_git_object: ea877213d1abe4811fee188eb7a60ccf1bb51f18 + src/mistralai/client/models/fimcompletionresponse.py: + id: b860d2ba771e + last_write_checksum: sha1:dffd5a7005999340f57eaa94e17b2c82ddc7fd90 + pristine_git_object: 1345a116b7855ab4b824cf0369c0a5281e44ea97 + src/mistralai/client/models/fimcompletionstreamrequest.py: + id: 1d1ee09f1913 + last_write_checksum: sha1:aa8313ecdd852034aaf6ec23dc3f04f7ef8e28e5 + pristine_git_object: e80efc095feb2e2df87f6d3c3f9c56b6cbf347b3 + src/mistralai/client/models/finetuneablemodeltype.py: + id: 05e097395df3 + last_write_checksum: sha1:daf4cd1869da582981023dea1074268da071e16a + pristine_git_object: 7b924bd7abc596f0607a513eee30e98cbf7ab57a + src/mistralai/client/models/finetunedmodelcapabilities.py: + id: 475c805eab95 + last_write_checksum: sha1:5919e48a6778f1a2360ce090d05b41b1bf33253f + pristine_git_object: 2f4cca0b8c0e3e379f5c2aa67953f2e55757f68d + src/mistralai/client/models/ftclassifierlossfunction.py: + id: d21e2a36ab1f + last_write_checksum: sha1:ca90e2f1cd0b9054293bea304be0867c93f7fac2 + pristine_git_object: ccb0f21b5a69f91119bec9db6e9f3d876e4c35af + src/mistralai/client/models/ftmodelcard.py: + id: c4f15eed2ca2 + last_write_checksum: sha1:b1b36ff994bcadd8c917880333627fd05976c991 + pristine_git_object: 2c26ff2f66faa55dc5a5a1743720e8f3f5d4d0f1 + src/mistralai/client/models/function.py: + id: 32275a9d8fee + last_write_checksum: sha1:ca24a512de22787932d7f4af005699621926d6c0 + pristine_git_object: 1da1dcc9b637d0a5b0fbb7cf2761f6d01eb3068f + src/mistralai/client/models/functioncall.py: + id: 393fca552632 + last_write_checksum: sha1:6e96e9abaa9b7625a9a30e376c31b596ee9defcb + pristine_git_object: 527c3ad408e1e1ccfe6301a8860e7f751e1d312d + src/mistralai/client/models/functioncallentry.py: + id: cd058446c0aa + last_write_checksum: sha1:776f397d17f946bae2929998f14d991a1ccc99e0 + pristine_git_object: d05fad856729a76dd24f8aa4d050f8381e51ed6a + src/mistralai/client/models/functioncallentryarguments.py: + id: 3df3767a7b93 + last_write_checksum: sha1:9858feba8f7f01017f10477a77dec851a1d06e55 + pristine_git_object: afe81b24e131a8ef879ee7f140271aa762b8ed2f + src/mistralai/client/models/functioncallevent.py: + id: 23b120b8f122 + last_write_checksum: sha1:62b5b94df4e5b6f945ead78871cdbfceb6cd40cf + pristine_git_object: 849eed76d08524e5e4d1e7cc1c3fa04386f5ef75 + src/mistralai/client/models/functionname.py: + id: 000acafdb0c0 + last_write_checksum: sha1:4145b7b817b712b85dcbedb309416c7ba72d827e + pristine_git_object: 07d98a0e65ccbcba330fb39c7f23e26d3ffc833c + src/mistralai/client/models/functionresultentry.py: + id: 213df39bd5e6 + last_write_checksum: sha1:3aa6834bf2beda061ac772a0a8a4d7ed5ad942a0 + pristine_git_object: 01e2e36fc0a9de6a2b06a4205004992baf0f9e43 + src/mistralai/client/models/functiontool.py: + id: 2e9ef5800117 + last_write_checksum: sha1:bce744d77a3dac92d4776a37be497311674bdc7d + pristine_git_object: eae872643c85115a825c2feda11d9a6c12a06b99 + src/mistralai/client/models/getfileresponse.py: + id: 81919086e371 + last_write_checksum: sha1:fc0232e54c0de355058c5bd82e424953b1659b56 + pristine_git_object: f625c153799dcd38e4990504d48371112b65cd15 + src/mistralai/client/models/getsignedurlresponse.py: + id: cee4e4197372 + last_write_checksum: sha1:ab9adbc06e7f02e791dc549ad1850ce1b1a250a7 + pristine_git_object: 4ba95894f2b89719fa58e7e397c28014dbd00316 + src/mistralai/client/models/githubrepository.py: + id: 4bc83ce18378 + last_write_checksum: sha1:21aa04bc426158ccbe1ded3bc65b46e6869e897d + pristine_git_object: 84b01078c2192de5d6668a6943d416a2ff30db5f + src/mistralai/client/models/githubrepositoryin.py: + id: eef26fbd2876 + last_write_checksum: sha1:18bd07155fff4b99d114353fee95e6bd828aeacd + pristine_git_object: 38bcc2087630f2fd4e9e5fa149449c32e21fdb07 + src/mistralai/client/models/imagedetail.py: + id: c1084b549abb + last_write_checksum: sha1:375db5c8fa87712dc37e46d0bf72283ae6cd6400 + pristine_git_object: 1982d357277a92fc7ebea3b99146116596d99c78 + src/mistralai/client/models/imagegenerationtool.py: + id: e1532275faa0 + last_write_checksum: sha1:88a1347876f69960dc33f8e2cb9929ab1a90a224 + pristine_git_object: c1789b18028156ae683d0323e65e47a43694570f + src/mistralai/client/models/imageurl.py: + id: e4bbf5881fbf + last_write_checksum: sha1:28ef2509fdb489ecf379b60e883e6957aebd2797 + pristine_git_object: ac1030f5d61144e393b2aa9f3ffea893faabb1f7 + src/mistralai/client/models/imageurlchunk.py: + id: 746fde62f637 + last_write_checksum: sha1:0ac388d25cae5348ffb3821706c3a8b64e716ff5 + pristine_git_object: 7134b46e7428cee52eda859cb78387c99f7e1f5a + src/mistralai/client/models/inputentries.py: + id: 44727997dacb + last_write_checksum: sha1:9e2a776be59c5043ea4179a60ac082faf064cc3d + pristine_git_object: e2da5a80aea121d18e2232f302ad73f63b4fc050 + src/mistralai/client/models/inputs.py: + id: 84a8007518c7 + last_write_checksum: sha1:d067587b5395529fbd638741f20b80edb2848e39 + pristine_git_object: 9ecd7f484ea306b91a9ebf038a0addd80ccd57c4 + src/mistralai/client/models/instructrequest.py: + id: 6d3ad9f896c7 + last_write_checksum: sha1:b56a77442b50b50151adedaa5ec356dc96c56428 + pristine_git_object: e5f9cccf174d8e73c42e8ee4aa294b43e1ad6cf5 + src/mistralai/client/models/jobmetadata.py: + id: cfbdde7fc0a2 + last_write_checksum: sha1:e1b180a47ca888d0fd4cbc34b62000d3ac86c2b5 + pristine_git_object: f6e96fa104e7a6c8ce9a94538a3d00167a2ae341 + src/mistralai/client/models/jobs_api_routes_batch_cancel_batch_jobop.py: + id: b56cb6c17c95 + last_write_checksum: sha1:21b5794f110c53691654d7195201f9a4b7793f21 + pristine_git_object: de2e63472ac53809cfeae200bd7d2f3dcbb70034 + src/mistralai/client/models/jobs_api_routes_batch_get_batch_jobop.py: + id: 36b5a6b3ceee + last_write_checksum: sha1:b41862f037d74bbdc44fb4df5f65cd402a16703b + pristine_git_object: d779e1d96c359b0d548d5dee17c06ae2a505cf47 + src/mistralai/client/models/jobs_api_routes_batch_get_batch_jobsop.py: + id: d8f0af99c94d + last_write_checksum: sha1:a50885f97cfd4d38bc3e3b0746c88bd602b88f94 + pristine_git_object: 89ac3c933347497b6fb1ec26fecb485802ef85fc + src/mistralai/client/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py: + id: 34f89d2af0ec + last_write_checksum: sha1:3d5242f757ee9be10963af9cd5d47824fc83c71a + pristine_git_object: 9fa99837dda7e9413d3a05822cd17107c5fae51d + src/mistralai/client/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py: + id: d175c6e32ecb + last_write_checksum: sha1:515b7737cf8262243ee6175e297714125f3962bc + pristine_git_object: 56fa534044522f27fb26ef4820d10f22752134ea + src/mistralai/client/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py: + id: 81651291187a + last_write_checksum: sha1:19a0707e2f73b0184959d7c710a170650fa1767a + pristine_git_object: db857f7d6cc77057491e4b968798f730228b09bc + src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py: + id: d910fd8fe2d6 + last_write_checksum: sha1:52704f01d7388a8b62d59b6f7cd94fcb7d067ebf + pristine_git_object: ddd9c1891356a7c272e0244a9aea3d3d6b2d00d6 + src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py: + id: cf43028824bf + last_write_checksum: sha1:36082bde6f3d932c66178729533e2a69040fdeab + pristine_git_object: ec80a158f45061b122f84ebaff89ae82ef8d98ef + src/mistralai/client/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py: + id: e7ff4a4a4edb + last_write_checksum: sha1:8cbfc309c09df806ad7d130004b4e1c2b89ede0a + pristine_git_object: cd25fa04f29dd544f01f3620b31d1c54c86addbb + src/mistralai/client/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py: + id: 7cc1c80335a9 + last_write_checksum: sha1:f66c16423155066b844f8e89446d2acbb6e68157 + pristine_git_object: fd01fe6948613b0fffef9ac76cf1a0f9011ec5af + src/mistralai/client/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py: + id: 6d9dc624aafd + last_write_checksum: sha1:fbacb171b9c75f1fe45406f542a958d10c15fae2 + pristine_git_object: 296070b426900305fe4596f03a3c9f081cdb2dcf + src/mistralai/client/models/jsonschema.py: + id: e1fc1d8a434a + last_write_checksum: sha1:d01507ab0a1f6067cbc65aaba199de340ccc68aa + pristine_git_object: dfababa694305c96f98ddebf2f09e448e737c855 + src/mistralai/client/models/legacyjobmetadata.py: + id: 0330b8930f65 + last_write_checksum: sha1:3c2f669a05cc01227f62d6a8da1840d9c458d52f + pristine_git_object: 5757675895b3c56d8aa7c174deb08567e596ecf8 + src/mistralai/client/models/libraries_delete_v1op.py: + id: b2e8bbd19baa + last_write_checksum: sha1:ba41496bc99040f7598659c5b037b955b7f6d385 + pristine_git_object: 893ab53b11672edd9cde175e68a80d89ff949cb6 + src/mistralai/client/models/libraries_documents_delete_v1op.py: + id: 81eb34382a3d + last_write_checksum: sha1:66d1c6ec5e2535b0db72a3beac65b25a1f2336d7 + pristine_git_object: 0495832efba33314f3cd28fe62759c6dac5ca706 + src/mistralai/client/models/libraries_documents_get_extracted_text_signed_url_v1op.py: + id: a7417ebd6040 + last_write_checksum: sha1:030ca9fb7e10396e6b743ee644fe1a734e1df1f0 + pristine_git_object: 186baaed8346d106272fea2e4826587634b061bc + src/mistralai/client/models/libraries_documents_get_signed_url_v1op.py: + id: d4b7b47913ba + last_write_checksum: sha1:fdad7a6d3ae9a9c69009caf8207b284835675a9a + pristine_git_object: ebcf85d77ed6982d510ae95a6971e1d4b3ad56ca + src/mistralai/client/models/libraries_documents_get_status_v1op.py: + id: f314f73e909c + last_write_checksum: sha1:11d463eb328a1133658e8ff92340edc7f75923e4 + pristine_git_object: 1f4847874cdeff26caaf5fd16e0f8382834ecb2b + src/mistralai/client/models/libraries_documents_get_text_content_v1op.py: + id: 1ca4e0c41321 + last_write_checksum: sha1:26133a83bf0ef063c78069da1bbb96d58f44f30c + pristine_git_object: e0508d66fce682ed20a029604897137940689327 + src/mistralai/client/models/libraries_documents_get_v1op.py: + id: 26ff35f0c69d + last_write_checksum: sha1:e87e56e8fb9f7c11d61c805362db4755a81186b9 + pristine_git_object: 857dfbe60c57af8b0fa6655a049ed336d70fb941 + src/mistralai/client/models/libraries_documents_list_v1op.py: + id: 756f26de3cbe + last_write_checksum: sha1:5a1a9e025dc7a1fedaa5199d396a73c4986d4113 + pristine_git_object: da7d793b65139a3894b077a9665b392e8a44e8a2 + src/mistralai/client/models/libraries_documents_reprocess_v1op.py: + id: dbbeb02fc336 + last_write_checksum: sha1:bd5013cb1581dd13642ce7edf1e701f5b0c967c4 + pristine_git_object: a2f9ba2a0465fb3a8eb3b9afbb41d813de348656 + src/mistralai/client/models/libraries_documents_update_v1op.py: + id: 734ba6c19f5f + last_write_checksum: sha1:e12ca003680ff17523fe09438cd8f71d00ea081e + pristine_git_object: 7ad4231f72901b675d9af67c33364592c86be5ab + src/mistralai/client/models/libraries_documents_upload_v1op.py: + id: "744466971862" + last_write_checksum: sha1:9691ac41ecf986c9ccfad81423d367f96b10f4b7 + pristine_git_object: 388633d1c7e906803b711ef2bbf37656624515a9 + src/mistralai/client/models/libraries_get_v1op.py: + id: d493f39e7ebb + last_write_checksum: sha1:25b3c2c1040cd73ebd6b988b8b27708831affefd + pristine_git_object: 7a51d6053aa2cf2e6524a80487fe9549eec3dfa1 + src/mistralai/client/models/libraries_share_create_v1op.py: + id: feaacfd46dd3 + last_write_checksum: sha1:72e07fb60edbe1989865ba2ac90349edeb183f7e + pristine_git_object: 00ea74824b2efc4150d2e547e2eee416e5f6f2ee + src/mistralai/client/models/libraries_share_delete_v1op.py: + id: 7f3a679ca384 + last_write_checksum: sha1:897857c11cf0c14a0a81ef122dec4395dc16c0ce + pristine_git_object: eca3f86a6135e702f8cb6412a5f215dac2335a8f + src/mistralai/client/models/libraries_share_list_v1op.py: + id: 8f0af379bf1c + last_write_checksum: sha1:d27e0360c504576c315350fc226d371da455a598 + pristine_git_object: 895a259059283a17cc7558e3cc03022e2d4dd259 + src/mistralai/client/models/libraries_update_v1op.py: + id: 92c8d4132252 + last_write_checksum: sha1:a252f68e65cdb47e27d7059f256381daf2847344 + pristine_git_object: 54b0ab708c665ccb841b1c8d0f2748c390850506 + src/mistralai/client/models/library.py: + id: 028a34b08f9c + last_write_checksum: sha1:65f02f963a0540385681b88c7c7fba98d0d704f4 + pristine_git_object: 1953b6fbc6d7ad245ccacd9d665fb29853b00af7 + src/mistralai/client/models/listbatchjobsresponse.py: + id: 99d94c86a871 + last_write_checksum: sha1:7530be5f80a0756527be94758e800e8118e53210 + pristine_git_object: 35a348a1160dcf6d82d58c70cea07e11730359fb + src/mistralai/client/models/listdocumentsresponse.py: + id: f593d8e66833 + last_write_checksum: sha1:0d842168856056ff681b2a1c36b87df8e0d96570 + pristine_git_object: c48b8c051ad0d1fb4aed8396697e57e782be5a40 + src/mistralai/client/models/listfilesresponse.py: + id: 85d6d24c1a19 + last_write_checksum: sha1:caf901685bfb6f13d707b89726aaf6e5116cd054 + pristine_git_object: 10a60126600343033a4b0511d717cac6f1924b4d + src/mistralai/client/models/listfinetuningjobsresponse.py: + id: 118e05dbfbbd + last_write_checksum: sha1:f0582740a6777039e9695d97f072b5a3c34b483e + pristine_git_object: 1e434c5986bf577e2b42cca943cc6896a83d1fa2 + src/mistralai/client/models/listlibrariesresponse.py: + id: df556a618365 + last_write_checksum: sha1:55afb46b1fa797bc46574e5256cd063574c6fcbf + pristine_git_object: 337fe105731d8f3ced1f8f1299ff4081b9d5bfbe + src/mistralai/client/models/listsharingout.py: + id: ee708a7ccdad + last_write_checksum: sha1:18e6501b00a566121dfd6a1ce7b0e23fef297e45 + pristine_git_object: 443ad0d6a275c1c8bae4adda3e67621b068c0412 + src/mistralai/client/models/messageentries.py: + id: e13f9009902b + last_write_checksum: sha1:43aebdc9eaecc8341298dc6b281d0d57edf4e9e6 + pristine_git_object: a95098e01843fe3b4087319881967dc42c6e4fef + src/mistralai/client/models/messageinputcontentchunks.py: + id: 01025c12866a + last_write_checksum: sha1:6a0988d4e52aa2e9f7b09ae1e3266ecf9639c22b + pristine_git_object: 1e04ce24d62db6667129b35eb28dabcfd4135ea8 + src/mistralai/client/models/messageinputentry.py: + id: c0a4b5179095 + last_write_checksum: sha1:b5bad18b88c0bfbbddfdafa6dc50a09e40a6ebd7 + pristine_git_object: c948a13e3cc2071dd1b3d11c419ea61d51470152 + src/mistralai/client/models/messageoutputcontentchunks.py: + id: 2ed248515035 + last_write_checksum: sha1:dc7456e44084cba9cc6a46553fd64b1eb25f8d77 + pristine_git_object: bf455d17db16e4bc11da0ebb105a9f6ad4d63c01 + src/mistralai/client/models/messageoutputentry.py: + id: a07577d2268d + last_write_checksum: sha1:38ad03422407925087835ab888c0be40bf5fa7fa + pristine_git_object: 6a9c52ed59af1497577be2538e7141d57eea4c8f + src/mistralai/client/models/messageoutputevent.py: + id: a2bbf63615c6 + last_write_checksum: sha1:c3317ab9279c499dd7fb26f45799ca9369676ac7 + pristine_git_object: d765f4fd3c4e43c37063833368e4b21cc0bfbcf2 + src/mistralai/client/models/metric.py: + id: c6a65acdd1a2 + last_write_checksum: sha1:5ef7c75b278f16b412b42889ff0f2fc19d87cb7d + pristine_git_object: 1413f589f7f23991a12c1367bc6f287b5e07d4a4 + src/mistralai/client/models/mistralpromptmode.py: + id: 95abc4ec799a + last_write_checksum: sha1:a1417b987bb34daeb73ca4e015c085814e6c8ad2 + pristine_git_object: 9b91323e7545d636308064085ca16fc554eac904 + src/mistralai/client/models/modelcapabilities.py: + id: 64d8a422ea29 + last_write_checksum: sha1:0f733a45f06cb2c603b47134d999a2de4c0a7bb0 + pristine_git_object: d9293ccc163995cfe0419d05c90fe1ae8e75cf57 + src/mistralai/client/models/modelconversation.py: + id: fea0a651f888 + last_write_checksum: sha1:4c1b31d95351dea877e24bd452b32d8e22edf42e + pristine_git_object: bb33d2e0e047bc075cb7ae284958b80a5b5ee657 + src/mistralai/client/models/modellist.py: + id: 00693c7eec60 + last_write_checksum: sha1:de62fc6787f482e5df0ff0e70415f493f177b9a1 + pristine_git_object: 5fd835f24cd1098a153ebfb3e958038a183d28a7 + src/mistralai/client/models/moderationobject.py: + id: 132faad0549a + last_write_checksum: sha1:a8c1454a533e466216ef98dd198ae8959f51fa76 + pristine_git_object: e7ccd8f6f1f75704a973be7ebabc49617070c34a + src/mistralai/client/models/moderationresponse.py: + id: 06bab279cb31 + last_write_checksum: sha1:b9158e575276c1e0a510c129347b9a98c5a70567 + pristine_git_object: a8a8ec3d8d8a58deb3c1f8358c6dce5a9734f89c + src/mistralai/client/models/ocrimageobject.py: + id: 685faeb41a80 + last_write_checksum: sha1:13f4e4d33d8fb5b0ee842695d4cc8329bd7ca382 + pristine_git_object: 365f062b5674141aad4b1601a85bec7a56db4cff + src/mistralai/client/models/ocrpagedimensions.py: + id: 02f763afbc9f + last_write_checksum: sha1:f572ed8992ba1ba4d53b705c4e8c94c85ae1290e + pristine_git_object: 847205c6c74a621dd2ee6d9eb18d1acba8395c50 + src/mistralai/client/models/ocrpageobject.py: + id: 07a099f89487 + last_write_checksum: sha1:5089ac3f02e4225d6c95cc9f05b74013694536da + pristine_git_object: ffc7b3b615e17a8e0d76fea4081249b143d8fe4d + src/mistralai/client/models/ocrrequest.py: + id: 36f204c64074 + last_write_checksum: sha1:9e9009dace9ff36cbff0cb8de408a1e0585147a7 + pristine_git_object: 4ad337ced23b3bdad21785b8dc3fcadbb868d4f0 + src/mistralai/client/models/ocrresponse.py: + id: 2fdfc881ca56 + last_write_checksum: sha1:f1d18dbf4cd02f3598ae574d5033c30989fa6985 + pristine_git_object: e63eed987f4eb83f3406b15cf4d840fd43528a49 + src/mistralai/client/models/ocrtableobject.py: + id: d74dd0d2ddac + last_write_checksum: sha1:492f8e4c30b61330592768b13cffcf9a9eb2c0fa + pristine_git_object: 66bb050f30790c3fc51cdca1b73e847388fe50c5 + src/mistralai/client/models/ocrusageinfo.py: + id: 272b7e1785d5 + last_write_checksum: sha1:2b37766fdff72e7ec6e052f248362f7bb3989d2c + pristine_git_object: 2ec1322b29d7fe5246b9ad355a4997222b37970f + src/mistralai/client/models/outputcontentchunks.py: + id: 9ad9741f4975 + last_write_checksum: sha1:16c43816ac7b7afd134bce1cda5bb44485d9fafe + pristine_git_object: fab7907b105cc9d9c738c5cca9c09eba9d5c4781 + src/mistralai/client/models/paginationinfo.py: + id: 48851e82d67e + last_write_checksum: sha1:166961e2c0f573ba0677ee803820bb944a8a5efb + pristine_git_object: 2b9dab6258249f7be87e1d4a73a2502e21fe1f0d + src/mistralai/client/models/prediction.py: + id: 1cc842a069a5 + last_write_checksum: sha1:3ee24375eb7f00cea0c9db6eebc564ce7067f295 + pristine_git_object: 0c6f4182ca8140e595f601b12fbd582034257587 + src/mistralai/client/models/processingstatusout.py: + id: 3df842c4140f + last_write_checksum: sha1:d5acc98adcfc76cdc4fc26e090ecfc4d7835a438 + pristine_git_object: ed2a4f22dcffe787ce69bb9c6011a95216cf3928 + src/mistralai/client/models/processstatus.py: + id: "0205512146e6" + last_write_checksum: sha1:22ad3d5fc80fbf3f83db61512e7bc79295c5fc91 + pristine_git_object: 15bdce2056f2f642096fbbdb2ac32ce1e5b8c9cf + src/mistralai/client/models/realtimetranscriptionerror.py: + id: 8c2267378f48 + last_write_checksum: sha1:78637de61d6fc3bc1fff8e95c0a6f5ffc1a3e111 + pristine_git_object: c661e46100752119521f63045e8ebe79105ecc01 + src/mistralai/client/models/realtimetranscriptionerrordetail.py: + id: 5bd25cdf9c7a + last_write_checksum: sha1:a226b10718b1fe4a661311cbd98ea3b1d1ac4163 + pristine_git_object: cec1f6eabd44ceab4e58694a0862c9c90ea2f264 + src/mistralai/client/models/realtimetranscriptioninputaudioappend.py: + id: 8b03cde6e115 + last_write_checksum: sha1:abcf48a48b077e836e2ae5647d93bd61007b9aa7 + pristine_git_object: 8156a2704bd95b74875f7a9ac17191e026f08993 + src/mistralai/client/models/realtimetranscriptioninputaudioend.py: + id: c187ba1b551d + last_write_checksum: sha1:fa96156774481ca3b98f8c0f99b3b1db01280b37 + pristine_git_object: 473eedb744141faa3447929865a76129d5e96432 + src/mistralai/client/models/realtimetranscriptioninputaudioflush.py: + id: b27b600c310e + last_write_checksum: sha1:8a8eb7de4137cf8cd810d93d984009bf8dff51c4 + pristine_git_object: 553d14c7720b3d1388901989d8160f0e3318ba56 + src/mistralai/client/models/realtimetranscriptionsession.py: + id: 02517fa5411a + last_write_checksum: sha1:eb9a23fb89e0bdb3bb6168f512488a98bd626bc1 + pristine_git_object: a74a457b1e54deb1fcd203ce5ff2c57691f16b18 + src/mistralai/client/models/realtimetranscriptionsessioncreated.py: + id: 4e3731f63a3c + last_write_checksum: sha1:6997848cf22dc90b10597eaf9f0dd966ace969af + pristine_git_object: bb96875ab913f3d6ff241a00d94a87e877637782 + src/mistralai/client/models/realtimetranscriptionsessionupdated.py: + id: 686dc4f2450f + last_write_checksum: sha1:e023fe0c8c54da644fc797c25dfeb070b6f0fd1c + pristine_git_object: fea5db4a1b956cb8253e4f147463c47958bfd989 + src/mistralai/client/models/realtimetranscriptionsessionupdatemessage.py: + id: 4e1b3fd7c5a3 + last_write_checksum: sha1:7da202e016b1d1dfc36a13ac03e3b419f0952cd2 + pristine_git_object: 07ad59a41f8a16b9c23c4e0be503a801ec0e2dd6 + src/mistralai/client/models/realtimetranscriptionsessionupdatepayload.py: + id: 7033fdb33ad4 + last_write_checksum: sha1:812f072a9effe1ce44e56094121ed10b3a83e39d + pristine_git_object: a89441e91dff4b7a78e8dd247b43243e89bf129d + src/mistralai/client/models/referencechunk.py: + id: 921acd3a224a + last_write_checksum: sha1:a8bff06a2a040556bce8e6212973a774bee6bd34 + pristine_git_object: e0bbae4e08275e82010080d4ee84612e01a07f81 + src/mistralai/client/models/requestsource.py: + id: 3f2774d9e609 + last_write_checksum: sha1:1ce68530a46793968f1122d29df722f0a5c9d267 + pristine_git_object: fc4433cb4e657b06aa6a4c078094c2df342810e2 + src/mistralai/client/models/responsedoneevent.py: + id: cf8a686bf82c + last_write_checksum: sha1:144a8bf407391948946f3f5362db78a33c45ee6c + pristine_git_object: be38fba81c08088303c4342c99ac3628c5957785 + src/mistralai/client/models/responseerrorevent.py: + id: b286d74e8724 + last_write_checksum: sha1:df3f53344624082471c795131552689510946345 + pristine_git_object: fa4d0d01c1cb7f15d6f469279c2000d2fad8f459 + src/mistralai/client/models/responseformat.py: + id: 6ab8bc8d22c0 + last_write_checksum: sha1:0ab455566c6bb0b63e2cb1f61f300266021f5954 + pristine_git_object: b2971412549cc5b95c53b93425dbd5b6503a4df7 + src/mistralai/client/models/responseformats.py: + id: c4462a05fb08 + last_write_checksum: sha1:3cb82d44a4f9df5e9a3f51867be6eab1d439d87a + pristine_git_object: 21345778ad2d41a3746292e67fec628f9ec2a84d + src/mistralai/client/models/responsestartedevent.py: + id: 24f54ee8b0f2 + last_write_checksum: sha1:f66a0a67444916e838ca9a63144fb661832b54b9 + pristine_git_object: 84abfcd9ac159b9bd9234ff015d5525d88d663f6 + src/mistralai/client/models/retrieve_model_v1_models_model_id_getop.py: + id: 6fefa90ca351 + last_write_checksum: sha1:52775e73fb5c51d245362ce63672cec776e5b6bd + pristine_git_object: cd5955c1eadb8cd9d1f9ecc388c2cc17df11c885 + src/mistralai/client/models/sampletype.py: + id: a9309422fed7 + last_write_checksum: sha1:86a61340a647696f6c35a82d945509b1c85aa6f7 + pristine_git_object: dfec7cce1e22ab607b6a9e947fa940284426086d + src/mistralai/client/models/security.py: + id: c2ca0e2a36b7 + last_write_checksum: sha1:d74333517caae2a1aa58517e8e935e46913bcc66 + pristine_git_object: f3b3423e850a1afa3b0fa5fa6c94f6018ff70627 + src/mistralai/client/models/shareenum.py: + id: a0e2a7a16bf8 + last_write_checksum: sha1:15a84d57ceeb74cfb37275f714954e42d8e9b3ba + pristine_git_object: 08ffeb7e46fbbc28b7c93ef2aa4a49aff7c0d35e + src/mistralai/client/models/sharingdelete.py: + id: f5ecce372e06 + last_write_checksum: sha1:247d793bd1ddc0ad35d010c17e5b32eba826e3a1 + pristine_git_object: 33ccd7e71b8f65d2a9329d8632b5446ca0431d0a + src/mistralai/client/models/sharingin.py: + id: e953dda09c02 + last_write_checksum: sha1:7c2b5333c634ed7889fc907edbf89c6066db5928 + pristine_git_object: 7c1a52b049db4afbd6a06b5f39966dbec4f862ba + src/mistralai/client/models/sharingout.py: + id: 0b8804effb5c + last_write_checksum: sha1:a78e4f6bf2f49ae8250787e1680b5004563b32ac + pristine_git_object: ab3679a4cbcc2826ff2672a09e4eaf4990b5c6a9 + src/mistralai/client/models/source.py: + id: fcee60a4ea0d + last_write_checksum: sha1:4d4277d75f7ce001780a069898b38afa7c8addc0 + pristine_git_object: fcea403cdbad44299fb2178f07a63bb7e83dc033 + src/mistralai/client/models/ssetypes.py: + id: 1733e4765106 + last_write_checksum: sha1:3c79fc7c43cd018fba4950ba013ed15899b82ebf + pristine_git_object: 0add960bc93f53df5ddda94892543a0857f32dd6 + src/mistralai/client/models/systemmessage.py: + id: 500ef6e85ba1 + last_write_checksum: sha1:a88de3fc70adab47943f867336659b3a1a6cdae0 + pristine_git_object: 2602cd2db03cd129b42b343f2dc79ce68106ac35 + src/mistralai/client/models/systemmessagecontentchunks.py: + id: 297e8905d5af + last_write_checksum: sha1:e5695ca0ebdb0f02f3a0c527015df154a0c52b7f + pristine_git_object: d480a219e935aaea91adc320de0003b562c0bbb5 + src/mistralai/client/models/textchunk.py: + id: 9c96fb86a9ab + last_write_checksum: sha1:89cbb66753d7a3585ce58c70219a349f770909cc + pristine_git_object: ac9f3137dddc15e1cd10aa6385b76510e6c23e33 + src/mistralai/client/models/thinkchunk.py: + id: 294bfce193a4 + last_write_checksum: sha1:9126c530e93ae7532235d4bfa3e2b202423a0f24 + pristine_git_object: 5995e6010bfb63d0ab2ded6e0f55b7dca23f769a + src/mistralai/client/models/timestampgranularity.py: + id: 68ddf8d702ea + last_write_checksum: sha1:64e7b198a75f026590e26758112651d31984076f + pristine_git_object: 8d3773752444db865c0e2629ad9eed66eb7f2bc6 + src/mistralai/client/models/tool.py: + id: 48b4f6f50fe9 + last_write_checksum: sha1:7e33d7a0349e652b40926f6a51240b9a5c1a7dbd + pristine_git_object: 2b9965e571eeb494f8cf867818aab488198ecdb2 + src/mistralai/client/models/toolcall.py: + id: fb34a1a3f3c2 + last_write_checksum: sha1:7d0275444dd6be291c091e908a2b7f2fc536f20f + pristine_git_object: 181cec33c904535c804de06c7357bd493647cd70 + src/mistralai/client/models/toolcallconfirmation.py: + id: f2e953cfb4fe + last_write_checksum: sha1:554a2e073917ffb479efe5887c0b59a2f4967c6e + pristine_git_object: fd6eca50a7ec2f4cca2ae20958717881660e0ac5 + src/mistralai/client/models/toolchoice.py: + id: 14f7e4cc35b6 + last_write_checksum: sha1:a787827a4f4ecf5b6a7068ba94fd1ff074898b51 + pristine_git_object: cb787df1b62190319c6e9679521228af28ee7204 + src/mistralai/client/models/toolchoiceenum.py: + id: c7798801f860 + last_write_checksum: sha1:5388b2a6fad842f8e4ae79e6257b4d14c122a6ff + pristine_git_object: d66c3d07058eb87bcc3eec10de99a616b5f6638a + src/mistralai/client/models/toolconfiguration.py: + id: faec24b75066 + last_write_checksum: sha1:912c1c10e88053ae4ee44af763c9ab7c95339f5d + pristine_git_object: b903c8b6c13777b671faf5aa97994117734b3a8f + src/mistralai/client/models/toolexecutiondeltaevent.py: + id: df8f17cf3e07 + last_write_checksum: sha1:2537a6e2dffde3760a064fdf92efa6cdc117ba2b + pristine_git_object: 5a977ca6fc5bfdeadd929f18037fb5c9a9582b40 + src/mistralai/client/models/toolexecutiondoneevent.py: + id: 514fdee7d99f + last_write_checksum: sha1:d62f57105e4816e03030bc9a2a5645482ea80c55 + pristine_git_object: 1c9b0ec92d87a8559ef050a21ba309e05f6b0314 + src/mistralai/client/models/toolexecutionentry.py: + id: 76db69eebe41 + last_write_checksum: sha1:9a697fdad4178b95d7d1bd1eaee77ef948fb2d4f + pristine_git_object: 0d6f2a1305f262519ba719969c6e62ceb95e52b3 + src/mistralai/client/models/toolexecutionstartedevent.py: + id: 40fadb8e49a1 + last_write_checksum: sha1:9f6e43d5b2c807ca3b080ea7bd4878ba3ec2a788 + pristine_git_object: 21e5bfa8fea7fa27b7031b740f72a873760700cc + src/mistralai/client/models/toolfilechunk.py: + id: 26c8aadf416a + last_write_checksum: sha1:89bb203aa600bf6a516fbe10e1787a132de9ca5a + pristine_git_object: 0708b3ff4c4f97a0e4c4359baeedc89ef0b10278 + src/mistralai/client/models/toolmessage.py: + id: 15f1af161031 + last_write_checksum: sha1:cfa16352cf5bbcd6eedbfbf7f3002149fd989418 + pristine_git_object: 05a0ee636a4393e3ce65cc1b6e272ddf8ec79254 + src/mistralai/client/models/toolreferencechunk.py: + id: 822e9f3e70de + last_write_checksum: sha1:f5c9265e27fa2d4526e5ce50dff7f7bd641eb642 + pristine_git_object: 95454fe891dd3955121565431897c1b8f0c25083 + src/mistralai/client/models/tooltypes.py: + id: 86c3b54272fd + last_write_checksum: sha1:e90c15c1e645a5f207af0c7ac728cb0a521c6706 + pristine_git_object: e601c1967c42ef8d0c2eea98bc5c0ca722cde066 + src/mistralai/client/models/trainingfile.py: + id: 2edf9bce227d + last_write_checksum: sha1:8fd6a2560554b3c2166daff2ff1a48bb49053489 + pristine_git_object: 2faeda8bfb38c810c5d80eb17cc9928c49c7caf5 + src/mistralai/client/models/transcriptionresponse.py: + id: 60896dbc6345 + last_write_checksum: sha1:e8a318798dfe4ebd64c9d64f487f7e3e8dd05532 + pristine_git_object: 70315463ff8e01c680aa80d68bdc32a7429ddb16 + src/mistralai/client/models/transcriptionsegmentchunk.py: + id: d1e6f3bdc74b + last_write_checksum: sha1:ee56c437444cbfa7983ba950e3e166f392d208cb + pristine_git_object: b87bfc2f9de0a07d62e8cc1fe265a9c29f56f194 + src/mistralai/client/models/transcriptionstreamdone.py: + id: 066a9158ed09 + last_write_checksum: sha1:cb8ea2e34c712ef1694bd1b6a83e7eed9318b13b + pristine_git_object: e3c5016901a2400c222e5b821b5afb312af1a1e6 + src/mistralai/client/models/transcriptionstreamevents.py: + id: b50b3d74f16f + last_write_checksum: sha1:68f82eea8a0bcf1b8b65cedf9e276f34121d398b + pristine_git_object: 073fd99aebf6f90027a45c8ee4daa7ffeb8ee34e + src/mistralai/client/models/transcriptionstreameventtypes.py: + id: 6f71f6fbf4c5 + last_write_checksum: sha1:1d568460b1521f17dd5e551632ae4d7883a98dd3 + pristine_git_object: c74bbb7483cc3981ee3638c80c15924f3e1c20c4 + src/mistralai/client/models/transcriptionstreamlanguage.py: + id: e94333e4bc27 + last_write_checksum: sha1:d1ee93b09ca377bc29845924d53db3ccf250269d + pristine_git_object: b6c6190684eccdc3fe6ce4bc7b86f5ee6490a197 + src/mistralai/client/models/transcriptionstreamsegmentdelta.py: + id: c0a882ce57e5 + last_write_checksum: sha1:3507a0355027136e92ada0c9766277381d5dee96 + pristine_git_object: 32ef8f9b2aa34253ea10c830ae856a931306f658 + src/mistralai/client/models/transcriptionstreamtextdelta.py: + id: 6086dc081147 + last_write_checksum: sha1:968b4bc32731be6c63be3fd90eb26f4357f891a3 + pristine_git_object: 42f0ffb7f16bee4f68f9db9807aa4ec3d9ae5176 + src/mistralai/client/models/unarchivemodelresponse.py: + id: 22e2ccbb0c80 + last_write_checksum: sha1:a69d8dc8636f3326eb61892b85a9b60044b457fe + pristine_git_object: 5c75d30edaade853f085533da0f9f5de221b6e44 + src/mistralai/client/models/updateagentrequest.py: + id: 914b4b2be67a + last_write_checksum: sha1:f37178288254e905ce298befbe801fa6ba63ec0e + pristine_git_object: b751ff74396ca0e74411a7a1549c6e0b4988fc49 + src/mistralai/client/models/updatedocumentrequest.py: + id: a8cfda07d337 + last_write_checksum: sha1:c644725ae379f22550d00b42baefb511d1cc3667 + pristine_git_object: 61e696555c0654208b0d9dcd63fc475ad85297d4 + src/mistralai/client/models/updatelibraryrequest.py: + id: 51bc63885337 + last_write_checksum: sha1:622d6a7af58d2e86d7d2dd4e312883d11ce5a8a8 + pristine_git_object: 91cbf2a1c76361c9c5ee1554c80f1507ff5ee50b + src/mistralai/client/models/updatemodelrequest.py: + id: fe649967751e + last_write_checksum: sha1:dbba8a6ccbfae36ac56808742f4c05ab99dd2c6c + pristine_git_object: f685cfcce1aa3669159fec902ba78034ef3141b8 + src/mistralai/client/models/usageinfo.py: + id: 54adb9a3af16 + last_write_checksum: sha1:04705526057c43495284fe9c50cf7df2af7b49fd + pristine_git_object: 31cbf07e3e38df4452da320e44f3fa9aef17c196 + src/mistralai/client/models/usermessage.py: + id: cb583483acf4 + last_write_checksum: sha1:0060ee5f5fbbd78073cd56546127a021354a8072 + pristine_git_object: 63e7679246a11fe8e7a3db06e382779c05c64366 + src/mistralai/client/models/validationerror.py: + id: 15df3c7368ab + last_write_checksum: sha1:63df5739d68f984470d4d1b8661a875201cc301d + pristine_git_object: 385714c8cb80a8afbca6d5142a2d378d0d165cf9 + src/mistralai/client/models/wandbintegration.py: + id: 4823c1e80942 + last_write_checksum: sha1:cc0a7ce49756928f4d261375526a3498b9e4f05d + pristine_git_object: f0df2c77845b2741802730fcd4f3c5d31b7ddd8e + src/mistralai/client/models/wandbintegrationresult.py: + id: 8787b4ad5458 + last_write_checksum: sha1:6ba506e01333a3084f63fbfccb459235b6560554 + pristine_git_object: 575cbd42297f02a54542c7eda3a4cabaa28dda23 + src/mistralai/client/models/websearchpremiumtool.py: + id: bfe88af887e3 + last_write_checksum: sha1:ceb073d3b3916b2ff8f7b7e5eb01692893024d68 + pristine_git_object: 00d4a4b427331660d29513ec43e68fc7cf8afcfb + src/mistralai/client/models/websearchtool.py: + id: 26b0903423e5 + last_write_checksum: sha1:a07d7ace2d68c944c686e69053bef8d84231814b + pristine_git_object: 6871080f6279ef42a0525c1e26368baafc98fbb7 + src/mistralai/client/models_.py: + id: 1d277958a843 + last_write_checksum: sha1:b9ea906a7704aa57efe5d13ac547e502d961d3b5 + pristine_git_object: a287c413ddf48bd5ff7fc0a685e05d4bcdabb6e5 + src/mistralai/client/ocr.py: + id: 2f804a12fc62 + last_write_checksum: sha1:707d91582149e76a3109df8b1a58bfd44111a93d + pristine_git_object: a46119d1577036be57896a7ea3737ab508497e4f + src/mistralai/client/py.typed: + id: d95cd1565e33 + last_write_checksum: sha1:8efc425ffe830805ffcc0f3055871bdcdc542c60 + pristine_git_object: 3e38f1a929f7d6b1d6de74604aa87e3d8f010544 + src/mistralai/client/sdk.py: + id: 48edbcb38d7e + last_write_checksum: sha1:365709e35dc4e450a2c4931e75dcbd04568ab361 + pristine_git_object: 80bf25a749eb3b36035aaafa15f059bcf403ec80 + src/mistralai/client/sdkconfiguration.py: + id: b7dd68a0235e + last_write_checksum: sha1:c6944f12c6fdc992d43db943b24c8c90854cde5e + pristine_git_object: 712e92e05c7fd3016431ec62ecb7b7789c8b7071 + src/mistralai/client/transcriptions.py: + id: 75b45780c978 + last_write_checksum: sha1:27a5b7dd6ed47b0f79b95fbb8599d439512ef344 + pristine_git_object: 7f01917d6e462cff9af75e70d32afbcc5958c7de + src/mistralai/client/types/__init__.py: + id: 000b943f821c + last_write_checksum: sha1:12a4ace69cbc63f1125eeddf901afed7cdf378b0 + pristine_git_object: cf83864312d8fed0bb9dd3ce39d373b040c36b2e + src/mistralai/client/types/basemodel.py: + id: 7ec465a1d3ff + last_write_checksum: sha1:b62a9d42d79a238399e04efbf5c02215c707efde + pristine_git_object: 4e889aa0ffbb4402e416a40fa6259334cb0a3c5c + src/mistralai/client/utils/__init__.py: + id: b69505f4b269 + last_write_checksum: sha1:98698da73839db7c258fd1afd45ccacff86c64be + pristine_git_object: 4bde281a1fd8c616d4b3529af0fcb79f57374310 + src/mistralai/client/utils/annotations.py: + id: 1ffdedfc66a2 + last_write_checksum: sha1:f86ba37de752e63076f25d53f9c54fce98d2a0bd + pristine_git_object: 4b60ab8e730e7093a064b6869c4a712b96e4aad8 + src/mistralai/client/utils/datetimes.py: + id: c40066d868c9 + last_write_checksum: sha1:412ca432d6f5a75b692a967bc6fc52e4f4eff7d5 + pristine_git_object: a2c94fac73ecbfb8acd8ed4f75692318e4f863ec + src/mistralai/client/utils/dynamic_imports.py: + id: ac9918d925c0 + last_write_checksum: sha1:93d3eac90a47a039e7a652ae120bec66be6c681a + pristine_git_object: 969f2fc71178ed2114640c8f0831f4f3acb25af8 + src/mistralai/client/utils/enums.py: + id: a0735873b5ac + last_write_checksum: sha1:fe05b6a21360b0eff1fc246e9a3ee01758521262 + pristine_git_object: d897495f053459106144501c67f2215251d52a27 + src/mistralai/client/utils/eventstreaming.py: + id: 3263d7502030 + last_write_checksum: sha1:24af3168dafe6b8d860cffb121fac11cd0e9d930 + pristine_git_object: 19a121529f180968f655baffbe446e5c1d6c2abb + src/mistralai/client/utils/forms.py: + id: 58842e905fce + last_write_checksum: sha1:d68ca0257e0e8bdc5cdc450f3e70a7ba789859f5 + pristine_git_object: 6facec5386675ccd5a26ff6093f98436a62fdf6b + src/mistralai/client/utils/headers.py: + id: 9066de2ead8b + last_write_checksum: sha1:bcd2f47b96bfaa54b3590c557a9267142d446be6 + pristine_git_object: 6491187230b5f11c7ff13396891ac69099a73a79 + src/mistralai/client/utils/logger.py: + id: 745023607a1f + last_write_checksum: sha1:2582e0cb889b6293c12ce9671aba6281d46bad44 + pristine_git_object: 3edad8307ea0ef38e857596a3ec11023a4af287f + src/mistralai/client/utils/metadata.py: + id: d49d535ae52c + last_write_checksum: sha1:54d300a665d3d5eafcc778a795d79347479b8337 + pristine_git_object: d46ffa59952926b7b1a842b0db2475527eda87df + src/mistralai/client/utils/queryparams.py: + id: bb77d4664844 + last_write_checksum: sha1:d02ce5b2dcc26edb7c937d75b98b70c22a5af189 + pristine_git_object: 0b78c548233f32afa2aafe0040ebb120b51532e8 + src/mistralai/client/utils/requestbodies.py: + id: 946cfcd26ee4 + last_write_checksum: sha1:8cac30839193ee0bb02975b0e225eab97adf4fd1 + pristine_git_object: 3aae69c7cf618776daec8bd46f9116b06c25e837 + src/mistralai/client/utils/retries.py: + id: 5f1a5b90423c + last_write_checksum: sha1:bbf8e376c1c801911e65e33566d3a142f46133f9 + pristine_git_object: bea1304150e77ca06185efb7db7798aaacd5e623 + src/mistralai/client/utils/security.py: + id: 1acb7c006265 + last_write_checksum: sha1:3981f6571daf28b3b553beb09a4ebeeeb6ceff14 + pristine_git_object: d8b9d8fe746babd0a87846812b1f4117d1a46de2 + src/mistralai/client/utils/serializers.py: + id: 53c57c7f29a8 + last_write_checksum: sha1:8a3a15cf273034261111f2559cacbb579e17cb1b + pristine_git_object: fbc2772dc4284775be92de6a086c1eade9376417 + src/mistralai/client/utils/unions.py: + id: d23713342634 + last_write_checksum: sha1:f814d757474f039199f501aa53cdfba97a8c6645 + pristine_git_object: 14ef1bd5c5abef9bd5f2a3a4ee2f79e954c67e7e + src/mistralai/client/utils/unmarshal_json_response.py: + id: b13585fc5626 + last_write_checksum: sha1:372a01f5abf034ddbe5d4a3fc68e9e397f86085a + pristine_git_object: 624433c4dd42c9fb1bfae363becc76c62e390e14 + src/mistralai/client/utils/url.py: + id: 3c6496c17510 + last_write_checksum: sha1:c64be472d29cf229f2b91102808dcb741371c227 + pristine_git_object: 27a6a3a05287ff8a4e24e379ae5d20280c2caf30 + src/mistralai/client/utils/values.py: + id: bb6ade7a7f82 + last_write_checksum: sha1:da9ce43ad241db386efd9b2f53d81eb051dd7544 + pristine_git_object: 2469a9f310a37a7170b54853715274f13d38901c +examples: + list_models_v1_models_get: + speakeasy-default-list-models-v1-models-get: + responses: + "200": + application/json: {"object": "list"} + "422": + application/json: {} + userExample: + responses: + "200": + application/json: {"object": "list"} + retrieve_model_v1_models__model_id__get: + speakeasy-default-retrieve-model-v1-models-model-id-get: + parameters: + path: + model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" + responses: + "200": + application/json: {"id": "", "object": "model", "owned_by": "mistralai", "capabilities": {"completion_chat": true, "function_calling": true, "completion_fim": false, "fine_tuning": false, "vision": false, "ocr": false, "classification": false, "moderation": false, "audio": false, "audio_transcription": false}, "max_context_length": 32768, "type": "fine-tuned", "job": "Product Markets Facilitator", "root": "", "archived": false} + "422": + application/json: {} + userExample: + parameters: + path: + model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" + responses: + "200": + application/json: {"id": "", "object": "model", "owned_by": "mistralai", "capabilities": {"completion_chat": false, "function_calling": false, "completion_fim": false, "fine_tuning": false, "vision": false, "ocr": false, "classification": false, "moderation": false, "audio": false, "audio_transcription": false}, "max_context_length": 32768, "type": "fine-tuned", "job": "Product Markets Facilitator", "root": "", "archived": false} + delete_model_v1_models__model_id__delete: + speakeasy-default-delete-model-v1-models-model-id-delete: + parameters: + path: + model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" + responses: + "200": + application/json: {"id": "ft:open-mistral-7b:587a6b29:20240514:7e773925", "object": "model", "deleted": true} + "422": + application/json: {} + userExample: + parameters: + path: + model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" + responses: + "200": + application/json: {"id": "ft:open-mistral-7b:587a6b29:20240514:7e773925", "object": "model", "deleted": true} + jobs_api_routes_fine_tuning_update_fine_tuned_model: + speakeasy-default-jobs-api-routes-fine-tuning-update-fine-tuned-model: + parameters: + path: + model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" + requestBody: + application/json: {} + responses: + "200": + application/json: {"id": "", "object": "model", "created": 124166, "owned_by": "", "workspace_id": "", "root": "", "root_version": "", "archived": true, "capabilities": {"completion_chat": true, "completion_fim": false, "function_calling": false, "fine_tuning": false, "classification": false}, "max_context_length": 32768, "job": "c4f8ef9a-6612-4f49-88fa-a80eb8116e46", "model_type": "completion"} + jobs_api_routes_fine_tuning_archive_fine_tuned_model: + speakeasy-default-jobs-api-routes-fine-tuning-archive-fine-tuned-model: + parameters: + path: + model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" + responses: + "200": + application/json: {"id": "", "object": "model", "archived": true} + jobs_api_routes_fine_tuning_unarchive_fine_tuned_model: + speakeasy-default-jobs-api-routes-fine-tuning-unarchive-fine-tuned-model: + parameters: + path: + model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" + responses: + "200": + application/json: {"id": "", "object": "model", "archived": false} + agents_api_v1_conversations_start: + speakeasy-default-agents-api-v1-conversations-start: + requestBody: + application/json: {"inputs": "", "stream": false, "completion_args": {"response_format": {"type": "text"}}} + responses: + "200": + application/json: {"object": "conversation.response", "conversation_id": "", "outputs": [{"object": "entry", "type": "agent.handoff", "previous_agent_id": "", "previous_agent_name": "", "next_agent_id": "", "next_agent_name": ""}], "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}} + "422": + application/json: {} + agents_api_v1_conversations_list: + speakeasy-default-agents-api-v1-conversations-list: + parameters: + query: + page: 0 + page_size: 100 + responses: + "200": + application/json: [{"object": "conversation", "id": "", "created_at": "2025-11-20T22:30:47.754Z", "updated_at": "2025-08-05T08:36:20.296Z", "agent_id": ""}] + "422": + application/json: {} + agents_api_v1_conversations_get: + speakeasy-default-agents-api-v1-conversations-get: + parameters: + path: + conversation_id: "" + responses: + "200": + application/json: {"object": "conversation", "id": "", "created_at": "2023-06-02T14:00:42.201Z", "updated_at": "2024-10-06T17:16:50.325Z", "agent_id": ""} + "422": + application/json: {} + agents_api_v1_conversations_append: + speakeasy-default-agents-api-v1-conversations-append: + parameters: + path: + conversation_id: "" + requestBody: + application/json: {"stream": false, "store": true, "handoff_execution": "server", "completion_args": {"response_format": {"type": "text"}}} + responses: + "200": + application/json: {"object": "conversation.response", "conversation_id": "", "outputs": [], "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}} + "422": + application/json: {} + agents_api_v1_conversations_history: + speakeasy-default-agents-api-v1-conversations-history: + parameters: + path: + conversation_id: "" + responses: + "200": + application/json: {"object": "conversation.history", "conversation_id": "", "entries": [{"object": "entry", "type": "tool.execution", "name": "image_generation", "arguments": ""}]} + "422": + application/json: {} + agents_api_v1_conversations_messages: + speakeasy-default-agents-api-v1-conversations-messages: + parameters: + path: + conversation_id: "" + responses: + "200": + application/json: {"object": "conversation.messages", "conversation_id": "", "messages": []} + "422": + application/json: {} + agents_api_v1_conversations_restart: + speakeasy-default-agents-api-v1-conversations-restart: + parameters: + path: + conversation_id: "" + requestBody: + application/json: {"stream": false, "store": true, "handoff_execution": "server", "completion_args": {"response_format": {"type": "text"}}, "from_entry_id": ""} + responses: + "200": + application/json: {"object": "conversation.response", "conversation_id": "", "outputs": [], "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}} + "422": + application/json: {} + agents_api_v1_conversations_start_stream: + speakeasy-default-agents-api-v1-conversations-start-stream: + requestBody: + application/json: {"inputs": [{"object": "entry", "type": "function.result", "tool_call_id": "", "result": ""}], "stream": true, "completion_args": {"response_format": {"type": "text"}}} + responses: + "422": + application/json: {} + agents_api_v1_conversations_append_stream: + speakeasy-default-agents-api-v1-conversations-append-stream: + parameters: + path: + conversation_id: "" + requestBody: + application/json: {"stream": true, "store": true, "handoff_execution": "server", "completion_args": {"response_format": {"type": "text"}}} + responses: + "422": + application/json: {} + agents_api_v1_conversations_restart_stream: + speakeasy-default-agents-api-v1-conversations-restart-stream: + parameters: + path: + conversation_id: "" + requestBody: + application/json: {"stream": true, "store": true, "handoff_execution": "server", "completion_args": {"response_format": {"type": "text"}}, "from_entry_id": ""} + responses: + "422": + application/json: {} + agents_api_v1_agents_create: + speakeasy-default-agents-api-v1-agents-create: + requestBody: + application/json: {"completion_args": {"response_format": {"type": "text"}}, "model": "LeBaron", "name": ""} + responses: + "200": + application/json: {"completion_args": {"response_format": {"type": "text"}}, "model": "Ranchero", "name": "", "object": "agent", "id": "", "version": 316961, "versions": [818563, 316961], "created_at": "2025-03-26T19:00:51.430Z", "updated_at": "2023-04-28T15:08:02.110Z", "deployment_chat": false, "source": ""} + "422": + application/json: {} + agents_api_v1_agents_list: + speakeasy-default-agents-api-v1-agents-list: + parameters: + query: + page: 0 + page_size: 20 + responses: + "200": + application/json: [{"model": "Impala", "name": "", "object": "agent", "id": "", "version": 43153, "versions": [43153, 439473], "created_at": "2024-04-26T15:54:09.954Z", "updated_at": "2024-02-11T18:27:55.607Z", "deployment_chat": true, "source": ""}] + "422": + application/json: {} + agents_api_v1_agents_get: + speakeasy-default-agents-api-v1-agents-get: + parameters: + path: + agent_id: "" + responses: + "200": + application/json: {"completion_args": {"response_format": {"type": "text"}}, "model": "Silverado", "name": "", "object": "agent", "id": "", "version": 845972, "versions": [845972, 878771, 621094], "created_at": "2025-08-21T03:10:48.135Z", "updated_at": "2024-11-11T17:15:57.309Z", "deployment_chat": false, "source": ""} + "422": + application/json: {} + agents_api_v1_agents_update: + speakeasy-default-agents-api-v1-agents-update: + parameters: + path: + agent_id: "" + requestBody: + application/json: {"completion_args": {"response_format": {"type": "text"}}} + responses: + "200": + application/json: {"completion_args": {"response_format": {"type": "text"}}, "model": "Model X", "name": "", "object": "agent", "id": "", "version": 799821, "versions": [799821, 934063], "created_at": "2025-10-20T17:35:08.067Z", "updated_at": "2023-11-16T08:47:13.265Z", "deployment_chat": true, "source": ""} + "422": + application/json: {} + agents_api_v1_agents_update_version: + speakeasy-default-agents-api-v1-agents-update-version: + parameters: + path: + agent_id: "" + query: + version: 157995 + responses: + "200": + application/json: {"completion_args": {"response_format": {"type": "text"}}, "model": "XTS", "name": "", "object": "agent", "id": "", "version": 310764, "versions": [], "created_at": "2023-05-08T23:29:06.216Z", "updated_at": "2023-05-16T19:20:05.735Z", "deployment_chat": false, "source": ""} + "422": + application/json: {} + files_api_routes_upload_file: + speakeasy-default-files-api-routes-upload-file: + requestBody: + multipart/form-data: {"file": "x-file: example.file"} + responses: + "200": + application/json: {"id": "497f6eca-6276-4993-bfeb-53cbbbba6f09", "object": "file", "bytes": 13000, "created_at": 1716963433, "filename": "files_upload.jsonl", "purpose": "batch", "sample_type": "batch_result", "source": "upload"} + userExample: + requestBody: + multipart/form-data: {"file": "x-file: example.file"} + responses: + "200": + application/json: {"id": "e85980c9-409e-4a46-9304-36588f6292b0", "object": "file", "bytes": 13000, "created_at": 1759500189, "filename": "example.file.jsonl", "purpose": "fine-tune", "sample_type": "instruct", "num_lines": 2, "mimetype": "application/jsonl", "source": "upload", "signature": "d4821d2de1917341"} + files_api_routes_list_files: + speakeasy-default-files-api-routes-list-files: + parameters: + query: + page: 0 + page_size: 100 + responses: + "200": + application/json: {"data": [{"id": "497f6eca-6276-4993-bfeb-53cbbbba6f09", "object": "file", "bytes": 13000, "created_at": 1716963433, "filename": "files_upload.jsonl", "purpose": "fine-tune", "sample_type": "batch_error", "source": "upload"}], "object": "", "total": 999335} + userExample: + parameters: + query: + page: 0 + page_size: 100 + include_total: true + responses: + "200": + application/json: {"data": [{"id": "", "object": "file", "bytes": 13000, "created_at": 1759491994, "filename": "", "purpose": "batch", "sample_type": "batch_result", "num_lines": 2, "mimetype": "application/jsonl", "source": "mistral", "signature": null}, {"id": "", "object": "file", "bytes": 13000, "created_at": 1759491994, "filename": "", "purpose": "batch", "sample_type": "batch_result", "num_lines": 2, "mimetype": "application/jsonl", "source": "mistral", "signature": null}], "object": "list", "total": 2} + files_api_routes_retrieve_file: + speakeasy-default-files-api-routes-retrieve-file: + parameters: + path: + file_id: "f2a27685-ca4e-4dc2-9f2b-88c422c3e0f6" + responses: + "200": + application/json: {"id": "497f6eca-6276-4993-bfeb-53cbbbba6f09", "object": "file", "bytes": 13000, "created_at": 1716963433, "filename": "files_upload.jsonl", "purpose": "batch", "sample_type": "instruct", "source": "repository", "deleted": false} + userExample: + parameters: + path: + file_id: "f2a27685-ca4e-4dc2-9f2b-88c422c3e0f6" + responses: + "200": + application/json: {"id": "e85980c9-409e-4a46-9304-36588f6292b0", "object": "file", "bytes": 13000, "created_at": 1759500189, "filename": "example.file.jsonl", "purpose": "fine-tune", "sample_type": "instruct", "num_lines": 2, "mimetype": "application/jsonl", "source": "upload", "signature": "d4821d2de1917341", "deleted": false} + files_api_routes_delete_file: + speakeasy-default-files-api-routes-delete-file: + parameters: + path: + file_id: "3b6d45eb-e30b-416f-8019-f47e2e93d930" + responses: + "200": + application/json: {"id": "497f6eca-6276-4993-bfeb-53cbbbba6f09", "object": "file", "deleted": false} + userExample: + parameters: + path: + file_id: "3b6d45eb-e30b-416f-8019-f47e2e93d930" + responses: + "200": + application/json: {"id": "e85980c9-409e-4a46-9304-36588f6292b0", "object": "file", "deleted": true} + files_api_routes_download_file: + speakeasy-default-files-api-routes-download-file: + parameters: + path: + file_id: "f8919994-a4a1-46b2-8b5b-06335a4300ce" + responses: + "200": + application/octet-stream: "x-file: example.file" + files_api_routes_get_signed_url: + speakeasy-default-files-api-routes-get-signed-url: + parameters: + path: + file_id: "06a020ab-355c-49a6-b19d-304b7c01699f" + query: + expiry: 24 + responses: + "200": + application/json: {"url": "https://knotty-birdcage.net/"} + userExample: + parameters: + path: + file_id: "06a020ab-355c-49a6-b19d-304b7c01699f" + query: + expiry: 24 + responses: + "200": + application/json: {"url": "https://mistralaifilesapiprodswe.blob.core.windows.net/fine-tune/.../.../e85980c9409e4a46930436588f6292b0.jsonl?se=2025-10-04T14%3A16%3A17Z&sp=r&sv=2025-01-05&sr=b&sig=..."} + jobs_api_routes_fine_tuning_get_fine_tuning_jobs: + speakeasy-default-jobs-api-routes-fine-tuning-get-fine-tuning-jobs: + parameters: + query: + page: 0 + page_size: 100 + created_by_me: false + responses: + "200": + application/json: {"object": "list", "total": 843585} + jobs_api_routes_fine_tuning_create_fine_tuning_job: + speakeasy-default-jobs-api-routes-fine-tuning-create-fine-tuning-job: + requestBody: + application/json: {"model": "Camaro", "invalid_sample_skip_percentage": 0, "hyperparameters": {"learning_rate": 0.0001}} + responses: + "200": + application/json: {"expected_duration_seconds": 220, "cost": 10, "cost_currency": "EUR", "train_tokens_per_step": 131072, "train_tokens": 1310720, "data_tokens": 305375, "deprecated": true, "details": "", "epochs": 4.2922, "training_steps": 10, "object": "job.metadata"} + jobs_api_routes_fine_tuning_get_fine_tuning_job: + speakeasy-default-jobs-api-routes-fine-tuning-get-fine-tuning-job: + parameters: + path: + job_id: "c167a961-ffca-4bcf-93ac-6169468dd389" + responses: + "200": + application/json: {"id": "babac92a-96fa-48c4-931c-f6f97e1bf24c", "auto_start": false, "model": "Spyder", "status": "FAILED", "created_at": 232438, "modified_at": 32259, "training_files": ["7a95c5a0-399d-4665-84c8-deab766d22dc"], "object": "job", "job_type": "classifier", "hyperparameters": {"learning_rate": 0.0001}, "classifier_targets": [{"name": "", "labels": [], "weight": 5651, "loss_function": "single_class"}]} + jobs_api_routes_fine_tuning_cancel_fine_tuning_job: + speakeasy-default-jobs-api-routes-fine-tuning-cancel-fine-tuning-job: + parameters: + path: + job_id: "6188a2f6-7513-4e0f-89cc-3f8088523a49" + responses: + "200": + application/json: {"id": "770b9cc0-1ab6-44de-a816-67010644e9fb", "auto_start": false, "model": "Volt", "status": "CANCELLATION_REQUESTED", "created_at": 546404, "modified_at": 180081, "training_files": ["45e621c6-ac30-4133-b6d1-fc0d1fe24c9f"], "object": "job", "job_type": "classifier", "hyperparameters": {"learning_rate": 0.0001}, "classifier_targets": [{"name": "", "labels": [""], "weight": 1298.58, "loss_function": "multi_class"}]} + jobs_api_routes_fine_tuning_start_fine_tuning_job: + speakeasy-default-jobs-api-routes-fine-tuning-start-fine-tuning-job: + parameters: + path: + job_id: "56553e4d-0679-471e-b9ac-59a77d671103" + responses: + "200": + application/json: {"id": "68ad461a-676e-47fe-a07e-15e38f5082b5", "auto_start": false, "model": "Grand Cherokee", "status": "STARTED", "created_at": 134515, "modified_at": 192651, "training_files": ["39dabc3d-15eb-49ac-a549-69973f33acee"], "object": "job", "job_type": "completion", "hyperparameters": {"learning_rate": 0.0001}} + jobs_api_routes_batch_get_batch_jobs: + speakeasy-default-jobs-api-routes-batch-get-batch-jobs: + parameters: + query: + page: 0 + page_size: 100 + created_by_me: false + order_by: "-created" + responses: + "200": + application/json: {"object": "list", "total": 186589} + jobs_api_routes_batch_create_batch_job: + speakeasy-default-jobs-api-routes-batch-create-batch-job: + requestBody: + application/json: {"endpoint": "/v1/moderations", "model": "mistral-small-latest", "timeout_hours": 24} + responses: + "200": + application/json: {"id": "", "object": "batch", "input_files": ["7b2553d8-e17f-4df5-a862-a1678f6b5271", "8c618d9f-7d82-42ba-a284-d57d84f50a58", "c042f996-e842-441d-ae47-4e0850334e41"], "endpoint": "", "errors": [{"message": "", "count": 1}], "status": "SUCCESS", "created_at": 395527, "total_requests": 166919, "completed_requests": 258552, "succeeded_requests": 480980, "failed_requests": 684176} + jobs_api_routes_batch_get_batch_job: + speakeasy-default-jobs-api-routes-batch-get-batch-job: + parameters: + path: + job_id: "4017dc9f-b629-42f4-9700-8c681b9e7f0f" + responses: + "200": + application/json: {"id": "", "object": "batch", "input_files": ["11b83f16-f2f9-4de4-a81f-203fff419c99"], "endpoint": "", "errors": [], "status": "TIMEOUT_EXCEEDED", "created_at": 900958, "total_requests": 458292, "completed_requests": 184893, "succeeded_requests": 104800, "failed_requests": 836210} + jobs_api_routes_batch_cancel_batch_job: + speakeasy-default-jobs-api-routes-batch-cancel-batch-job: + parameters: + path: + job_id: "4fb29d1c-535b-4f0a-a1cb-2167f86da569" + responses: + "200": + application/json: {"id": "", "object": "batch", "input_files": ["8fd9d88a-66be-43fd-a816-ba509ca3ca85"], "endpoint": "", "errors": [], "status": "TIMEOUT_EXCEEDED", "created_at": 608251, "total_requests": 12693, "completed_requests": 203340, "succeeded_requests": 189291, "failed_requests": 969057} + chat_completion_v1_chat_completions_post: + speakeasy-default-chat-completion-v1-chat-completions-post: + requestBody: + application/json: {"model": "mistral-large-latest", "stream": false, "messages": [{"role": "user", "content": "Who is the best French painter? Answer in one short sentence."}], "response_format": {"type": "text"}} + responses: + "200": + application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}, "created": 1702256327, "choices": []} + "422": + application/json: {} + stream_chat: + speakeasy-default-stream-chat: + requestBody: + application/json: {"model": "mistral-large-latest", "stream": true, "messages": [{"role": "user", "content": "Who is the best French painter? Answer in one short sentence."}], "response_format": {"type": "text"}} + responses: + "422": + application/json: {} + fim_completion_v1_fim_completions_post: + speakeasy-default-fim-completion-v1-fim-completions-post: + requestBody: + application/json: {"model": "codestral-2405", "top_p": 1, "stream": false, "prompt": "def", "suffix": "return a+b"} + responses: + "200": + application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "codestral-latest", "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}, "created": 1702256327, "choices": []} + "422": + application/json: {} + userExample: + requestBody: + application/json: {"model": "codestral-latest", "top_p": 1, "stream": false, "prompt": "def", "suffix": "return a+b"} + responses: + "200": + application/json: {"id": "447e3e0d457e42e98248b5d2ef52a2a3", "object": "chat.completion", "model": "codestral-2508", "usage": {"prompt_tokens": 8, "completion_tokens": 91, "total_tokens": 99}, "created": 1759496862, "choices": [{"index": 0, "message": {"role": "assistant", "content": "add_numbers(a: int, b: int) -> int:\n \"\"\"\n You are given two integers `a` and `b`. Your task is to write a function that\n returns the sum of these two integers. The function should be implemented in a\n way that it can handle very large integers (up to 10^18). As a reminder, your\n code has to be in python\n \"\"\"\n", "tool_calls": null, "prefix": false}, "finish_reason": "stop"}]} + stream_fim: + speakeasy-default-stream-fim: + requestBody: + application/json: {"model": "codestral-latest", "top_p": 1, "stream": true, "prompt": "def", "suffix": "return a+b"} + responses: + "422": + application/json: {} + agents_completion_v1_agents_completions_post: + speakeasy-default-agents-completion-v1-agents-completions-post: + requestBody: + application/json: {"stream": false, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}], "agent_id": ""} + responses: + "200": + application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}, "created": 1702256327, "choices": []} + "422": + application/json: {} + userExample: + requestBody: + application/json: {"stream": false, "messages": [{"role": "user", "content": "Who is the best French painter? Answer in one short sentence."}], "response_format": {"type": "text"}, "agent_id": ""} + responses: + "200": + application/json: {"id": "cf79f7daaee244b1a0ae5c7b1444424a", "object": "chat.completion", "model": "mistral-medium-latest", "usage": {"prompt_tokens": 24, "completion_tokens": 27, "total_tokens": 51, "prompt_audio_seconds": {}}, "created": 1759500534, "choices": [{"index": 0, "message": {"role": "assistant", "content": "Arrr, the scallywag Claude Monet be the finest French painter to ever splash colors on a canvas, savvy?", "tool_calls": null, "prefix": false}, "finish_reason": "stop"}]} + stream_agents: + speakeasy-default-stream-agents: + requestBody: + application/json: {"stream": true, "messages": [{"role": "user", "content": "Who is the best French painter? Answer in one short sentence."}], "response_format": {"type": "text"}, "agent_id": ""} + responses: + "422": + application/json: {} + embeddings_v1_embeddings_post: + speakeasy-default-embeddings-v1-embeddings-post: + requestBody: + application/json: {"model": "mistral-embed", "input": ["Embed this sentence.", "As well as this one."]} + responses: + "200": + application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}, "data": [{"object": "embedding", "embedding": [0.1, 0.2, 0.3], "index": 0}, {"object": "embedding", "embedding": [0.4, 0.5, 0.6], "index": 1}]} + "422": + application/json: {} + userExample: + requestBody: + application/json: {"model": "mistral-embed", "input": ["Embed this sentence.", "As well as this one."]} + responses: + "200": + application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "list", "model": "mistral-embed", "usage": {"prompt_tokens": 15, "completion_tokens": 0, "total_tokens": 15, "prompt_audio_seconds": null}, "data": [{"object": "embedding", "embedding": [-0.016632080078125, 0.0701904296875, 0.03143310546875, 0.01309967041015625, 0.0202789306640625], "index": 0}, {"object": "embedding", "embedding": [-0.0230560302734375, 0.039337158203125, 0.0521240234375, -0.0184783935546875, 0.034271240234375], "index": 1}]} + moderations_v1_moderations_post: + speakeasy-default-moderations-v1-moderations-post: + requestBody: + application/json: {"model": "Durango", "input": ["", ""]} + responses: + "200": + application/json: {"id": "mod-e5cc70bb28c444948073e77776eb30ef", "model": "Corvette", "results": [{}]} + "422": + application/json: {} + userExample: + requestBody: + application/json: {"model": "mistral-moderation-latest", "input": ""} + responses: + "200": + application/json: {"id": "4d71ae510af942108ef7344f903e2b88", "model": "mistral-moderation-latest", "results": [{"categories": {"sexual": false, "hate_and_discrimination": false, "violence_and_threats": false, "dangerous_and_criminal_content": false, "selfharm": false, "health": false, "financial": false, "law": false, "pii": false}, "category_scores": {"sexual": 0.0011335690505802631, "hate_and_discrimination": 0.0030753696337342262, "violence_and_threats": 0.0003569706459529698, "dangerous_and_criminal_content": 0.002251847181469202, "selfharm": 0.00017952796770259738, "health": 0.0002780309587251395, "financial": 0.00008481103577651083, "law": 0.00004539786823443137, "pii": 0.0023967307060956955}}, {"categories": {"sexual": false, "hate_and_discrimination": false, "violence_and_threats": false, "dangerous_and_criminal_content": false, "selfharm": false, "health": false, "financial": false, "law": false, "pii": false}, "category_scores": {"sexual": 0.000626334105618298, "hate_and_discrimination": 0.0013670255430042744, "violence_and_threats": 0.0002611903182696551, "dangerous_and_criminal_content": 0.0030753696337342262, "selfharm": 0.00010889690747717395, "health": 0.00015843621804378927, "financial": 0.000191104321856983, "law": 0.00004006369272246957, "pii": 0.0035936026833951473}}]} + chat_moderations_v1_chat_moderations_post: + speakeasy-default-chat-moderations-v1-chat-moderations-post: + requestBody: + application/json: {"input": [{"content": "", "role": "tool"}], "model": "LeBaron"} + responses: + "200": + application/json: {"id": "mod-e5cc70bb28c444948073e77776eb30ef", "model": "Explorer", "results": [{}]} + "422": + application/json: {} + userExample: + requestBody: + application/json: {"input": [{"role": "tool", "content": ""}], "model": "LeBaron"} + responses: + "200": + application/json: {"id": "352bce1a55814127a3b0bc4fb8f02a35", "model": "mistral-moderation-latest", "results": [{"categories": {"sexual": false, "hate_and_discrimination": false, "violence_and_threats": false, "dangerous_and_criminal_content": false, "selfharm": false, "health": false, "financial": false, "law": false, "pii": false}, "category_scores": {"sexual": 0.0010322310263291001, "hate_and_discrimination": 0.001597845577634871, "violence_and_threats": 0.00020342698553577065, "dangerous_and_criminal_content": 0.0029810327105224133, "selfharm": 0.00017952796770259738, "health": 0.0002959570847451687, "financial": 0.000079673009167891, "law": 0.00004539786823443137, "pii": 0.004198795650154352}}]} + classifications_v1_classifications_post: + speakeasy-default-classifications-v1-classifications-post: + requestBody: + application/json: {"model": "mistral-moderation-latest", "input": [""]} + responses: + "200": + application/json: {"id": "mod-e5cc70bb28c444948073e77776eb30ef", "model": "ATS", "results": [{}, {"key": {"scores": {"key": 2080.19}}}]} + "422": + application/json: {} + chat_classifications_v1_chat_classifications_post: + speakeasy-default-chat-classifications-v1-chat-classifications-post: + requestBody: + application/json: {"model": "Camry", "input": [{"messages": [{"role": "system", "content": ""}]}]} + responses: + "200": + application/json: {"id": "mod-e5cc70bb28c444948073e77776eb30ef", "model": "Altima", "results": [{}, {"key": {"scores": {"key": 1360.53, "key1": 5946.42}}}, {"key": {"scores": {"key": 1360.53, "key1": 5946.42}}}]} + "422": + application/json: {} + ocr_v1_ocr_post: + speakeasy-default-ocr-v1-ocr-post: + requestBody: + application/json: {"model": "CX-9", "document": {"image_url": {"url": "https://measly-scrap.com"}, "type": "image_url"}} + responses: + "200": + application/json: {"pages": [{"index": 944919, "markdown": "", "images": [], "dimensions": {"dpi": 984283, "height": 453411, "width": 398292}}], "model": "Wrangler", "usage_info": {"pages_processed": 47064}} + "422": + application/json: {} + userExample: + requestBody: + application/json: {"model": "CX-9", "document": {"type": "document_url", "document_url": "https://upset-labourer.net/"}, "bbox_annotation_format": {"type": "text"}, "document_annotation_format": {"type": "text"}} + responses: + "200": + application/json: {"pages": [{"index": 1, "markdown": "# LEVERAGING UNLABELED DATA TO PREDICT OUT-OF-DISTRIBUTION PERFORMANCE\nSaurabh Garg*
Carnegie Mellon University
sgarg2@andrew.cmu.edu
Sivaraman Balakrishnan
Carnegie Mellon University
sbalakri@andrew.cmu.edu
Zachary C. Lipton
Carnegie Mellon University
zlipton@andrew.cmu.edu\n## Behnam Neyshabur\nGoogle Research, Blueshift team
neyshabur@google.com\nHanie Sedghi
Google Research, Brain team
hsedghi@google.com\n#### Abstract\nReal-world machine learning deployments are characterized by mismatches between the source (training) and target (test) distributions that may cause performance drops. In this work, we investigate methods for predicting the target domain accuracy using only labeled source data and unlabeled target data. We propose Average Thresholded Confidence (ATC), a practical method that learns a threshold on the model's confidence, predicting accuracy as the fraction of unlabeled examples for which model confidence exceeds that threshold. ATC outperforms previous methods across several model architectures, types of distribution shifts (e.g., due to synthetic corruptions, dataset reproduction, or novel subpopulations), and datasets (WILDS, ImageNet, BREEDS, CIFAR, and MNIST). In our experiments, ATC estimates target performance $2-4 \\times$ more accurately than prior methods. We also explore the theoretical foundations of the problem, proving that, in general, identifying the accuracy is just as hard as identifying the optimal predictor and thus, the efficacy of any method rests upon (perhaps unstated) assumptions on the nature of the shift. Finally, analyzing our method on some toy distributions, we provide insights concerning when it works ${ }^{1}$.\n## 1 INTRODUCTION\nMachine learning models deployed in the real world typically encounter examples from previously unseen distributions. While the IID assumption enables us to evaluate models using held-out data from the source distribution (from which training data is sampled), this estimate is no longer valid in presence of a distribution shift. Moreover, under such shifts, model accuracy tends to degrade (Szegedy et al., 2014; Recht et al., 2019; Koh et al., 2021). Commonly, the only data available to the practitioner are a labeled training set (source) and unlabeled deployment-time data which makes the problem more difficult. In this setting, detecting shifts in the distribution of covariates is known to be possible (but difficult) in theory (Ramdas et al., 2015), and in practice (Rabanser et al., 2018). However, producing an optimal predictor using only labeled source and unlabeled target data is well-known to be impossible absent further assumptions (Ben-David et al., 2010; Lipton et al., 2018).\nTwo vital questions that remain are: (i) the precise conditions under which we can estimate a classifier's target-domain accuracy; and (ii) which methods are most practically useful. To begin, the straightforward way to assess the performance of a model under distribution shift would be to collect labeled (target domain) examples and then to evaluate the model on that data. However, collecting fresh labeled data from the target distribution is prohibitively expensive and time-consuming, especially if the target distribution is non-stationary. Hence, instead of using labeled data, we aim to use unlabeled data from the target distribution, that is comparatively abundant, to predict model performance. Note that in this work, our focus is not to improve performance on the target but, rather, to estimate the accuracy on the target for a given classifier.\n[^0]: Work done in part while Saurabh Garg was interning at Google ${ }^{1}$ Code is available at [https://github.com/saurabhgarg1996/ATC_code](https://github.com/saurabhgarg1996/ATC_code).\n", "images": [], "dimensions": {"dpi": 200, "height": 2200, "width": 1700}}, {"index": 2, "markdown": "![img-0.jpeg](img-0.jpeg)\nFigure 1: Illustration of our proposed method ATC. Left: using source domain validation data, we identify a threshold on a score (e.g. negative entropy) computed on model confidence such that fraction of examples above the threshold matches the validation set accuracy. ATC estimates accuracy on unlabeled target data as the fraction of examples with the score above the threshold. Interestingly, this threshold yields accurate estimates on a wide set of target distributions resulting from natural and synthetic shifts. Right: Efficacy of ATC over previously proposed approaches on our testbed with a post-hoc calibrated model. To obtain errors on the same scale, we rescale all errors with Average Confidence (AC) error. Lower estimation error is better. See Table 1 for exact numbers and comparison on various types of distribution shift. See Sec. 5 for details on our testbed.\nRecently, numerous methods have been proposed for this purpose (Deng & Zheng, 2021; Chen et al., 2021b; Jiang et al., 2021; Deng et al., 2021; Guillory et al., 2021). These methods either require calibration on the target domain to yield consistent estimates (Jiang et al., 2021; Guillory et al., 2021) or additional labeled data from several target domains to learn a linear regression function on a distributional distance that then predicts model performance (Deng et al., 2021; Deng & Zheng, 2021; Guillory et al., 2021). However, methods that require calibration on the target domain typically yield poor estimates since deep models trained and calibrated on source data are not, in general, calibrated on a (previously unseen) target domain (Ovadia et al., 2019). Besides, methods that leverage labeled data from target domains rely on the fact that unseen target domains exhibit strong linear correlation with seen target domains on the underlying distance measure and, hence, can be rendered ineffective when such target domains with labeled data are unavailable (in Sec. 5.1 we demonstrate such a failure on a real-world distribution shift problem). Therefore, throughout the paper, we assume access to labeled source data and only unlabeled data from target domain(s).\nIn this work, we first show that absent assumptions on the source classifier or the nature of the shift, no method of estimating accuracy will work generally (even in non-contrived settings). To estimate accuracy on target domain perfectly, we highlight that even given perfect knowledge of the labeled source distribution (i.e., $p_{s}(x, y)$ ) and unlabeled target distribution (i.e., $p_{t}(x)$ ), we need restrictions on the nature of the shift such that we can uniquely identify the target conditional $p_{t}(y \\mid x)$. Thus, in general, identifying the accuracy of the classifier is as hard as identifying the optimal predictor.\nSecond, motivated by the superiority of methods that use maximum softmax probability (or logit) of a model for Out-Of-Distribution (OOD) detection (Hendrycks & Gimpel, 2016; Hendrycks et al., 2019), we propose a simple method that leverages softmax probability to predict model performance. Our method, Average Thresholded Confidence (ATC), learns a threshold on a score (e.g., maximum confidence or negative entropy) of model confidence on validation source data and predicts target domain accuracy as the fraction of unlabeled target points that receive a score above that threshold. ATC selects a threshold on validation source data such that the fraction of source examples that receive the score above the threshold match the accuracy of those examples. Our primary contribution in ATC is the proposal of obtaining the threshold and observing its efficacy on (practical) accuracy estimation. Importantly, our work takes a step forward in positively answering the question raised in Deng & Zheng (2021); Deng et al. (2021) about a practical strategy to select a threshold that enables accuracy prediction with thresholded model confidence.\n", "images": [{"id": "img-0.jpeg", "top_left_x": 292, "top_left_y": 217, "bottom_right_x": 1405, "bottom_right_y": 649, "image_base64": ""}], "dimensions": {"dpi": 200, "height": 2200, "width": 1700}}, {"index": 3, "markdown": "", "images": [], "dimensions": {"dpi": 539192, "height": 944919, "width": 247256}}, {"index": 27, "markdown": "![img-8.jpeg](img-8.jpeg)\nFigure 9: Scatter plot of predicted accuracy versus (true) OOD accuracy for vision datasets except MNIST with a ResNet50 model. Results reported by aggregating MAE numbers over 4 different seeds.\n", "images": [{"id": "img-8.jpeg", "top_left_x": 290, "top_left_y": 226, "bottom_right_x": 1405, "bottom_right_y": 1834, "image_base64": ""}], "dimensions": {"dpi": 200, "height": 2200, "width": 1700}}, {"index": 28, "markdown": "| Dataset | Shift | IM | | AC | | DOC | | GDE | ATC-MC (Ours) | | ATC-NE (Ours) | | | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | | | | Pre T | Post T | Pre T | Post T | Pre T | Post T | Post T | Pre T | Post T | Pre T | Post T | | CIFAR10 | Natural | 6.60 | 5.74 | 9.88 | 6.89 | 7.25 | 6.07 | 4.77 | 3.21 | 3.02 | 2.99 | 2.85 | | | | (0.35) | (0.30) | (0.16) | (0.13) | (0.15) | (0.16) | (0.13) | (0.49) | (0.40) | (0.37) | (0.29) | | | Synthetic | 12.33 | 10.20 | 16.50 | 11.91 | 13.87 | 11.08 | 6.55 | 4.65 | 4.25 | 4.21 | 3.87 | | | | (0.51) | (0.48) | (0.26) | (0.17) | (0.18) | (0.17) | (0.35) | (0.55) | (0.55) | (0.55) | (0.75) | | CIFAR100 | Synthetic | 13.69 | 11.51 | 23.61 | 13.10 | 14.60 | 10.14 | 9.85 | 5.50 | 4.75 | 4.72 | 4.94 | | | | (0.55) | (0.41) | (1.16) | (0.80) | (0.77) | (0.64) | (0.57) | (0.70) | (0.73) | (0.74) | (0.74) | | ImageNet200 | Natural | 12.37 | 8.19 | 22.07 | 8.61 | 15.17 | 7.81 | 5.13 | 4.37 | 2.04 | 3.79 | 1.45 | | | | (0.25) | (0.33) | (0.08) | (0.25) | (0.11) | (0.29) | (0.08) | (0.39) | (0.24) | (0.30) | (0.27) | | | Synthetic | 19.86 | 12.94 | 32.44 | 13.35 | 25.02 | 12.38 | 5.41 | 5.93 | 3.09 | 5.00 | 2.68 | | | | (1.38) | (1.81) | (1.00) | (1.30) | (1.10) | (1.38) | (0.89) | (1.38) | (0.87) | (1.28) | (0.45) | | ImageNet | Natural | 7.77 | 6.50 | 18.13 | 6.02 | 8.13 | 5.76 | 6.23 | 3.88 | 2.17 | 2.06 | 0.80 | | | | (0.27) | (0.33) | (0.23) | (0.34) | (0.27) | (0.37) | (0.41) | (0.53) | (0.62) | (0.54) | (0.44) | | | Synthetic | 13.39 | 10.12 | 24.62 | 8.51 | 13.55 | 7.90 | 6.32 | 3.34 | 2.53 | 2.61 | 4.89 | | | | (0.53) | (0.63) | (0.64) | (0.71) | (0.61) | (0.72) | (0.33) | (0.53) | (0.36) | (0.33) | (0.83) | | FMoW-WILDS | Natural | 5.53 | 4.31 | 33.53 | 12.84 | 5.94 | 4.45 | 5.74 | 3.06 | 2.70 | 3.02 | 2.72 | | | | (0.33) | (0.63) | (0.13) | (12.06) | (0.36) | (0.77) | (0.55) | (0.36) | (0.54) | (0.35) | (0.44) | | RxRx1-WILDS | Natural | 5.80 | 5.72 | 7.90 | 4.84 | 5.98 | 5.98 | 6.03 | 4.66 | 4.56 | 4.41 | 4.47 | | | | (0.17) | (0.15) | (0.24) | (0.09) | (0.15) | (0.13) | (0.08) | (0.38) | (0.38) | (0.31) | (0.26) | | Amazon-WILDS | Natural | 2.40 | 2.29 | 8.01 | 2.38 | 2.40 | 2.28 | 17.87 | 1.65 | 1.62 | 1.60 | 1.59 | | | | (0.08) | (0.09) | (0.53) | (0.17) | (0.09) | (0.09) | (0.18) | (0.06) | (0.05) | (0.14) | (0.15) | | CivilCom.-WILDS | Natural | 12.64 | 10.80 | 16.76 | 11.03 | 13.31 | 10.99 | 16.65 | | 7.14 | | | | | | (0.52) | (0.48) | (0.53) | (0.49) | (0.52) | (0.49) | (0.25) | | (0.41) | | | | MNIST | Natural | 18.48 | 15.99 | 21.17 | 14.81 | 20.19 | 14.56 | 24.42 | 5.02 | 2.40 | 3.14 | 3.50 | | | | (0.45) | (1.53) | (0.24) | (3.89) | (0.23) | (3.47) | (0.41) | (0.44) | (1.83) | (0.49) | (0.17) | | ENTITY-13 | Same | 16.23 | 11.14 | 24.97 | 10.88 | 19.08 | 10.47 | 10.71 | 5.39 | 3.88 | 4.58 | 4.19 | | | | (0.77) | (0.65) | (0.70) | (0.77) | (0.65) | (0.72) | (0.74) | (0.92) | (0.61) | (0.85) | (0.16) | | | Novel | 28.53 | 22.02 | 38.33 | 21.64 | 32.43 | 21.22 | 20.61 | 13.58 | 10.28 | 12.25 | 6.63 | | | | (0.82) | (0.68) | (0.75) | (0.86) | (0.69) | (0.80) | (0.60) | (1.15) | (1.34) | (1.21) | (0.93) | | ENTITY-30 | Same | 18.59 | 14.46 | 28.82 | 14.30 | 21.63 | 13.46 | 12.92 | 9.12 | 7.75 | 8.15 | 7.64 | | | | (0.51) | (0.52) | (0.43) | (0.71) | (0.37) | (0.59) | (0.14) | (0.62) | (0.72) | (0.68) | (0.88) | | | Novel | 32.34 | 26.85 | 44.02 | 26.27 | 36.82 | 25.42 | 23.16 | 17.75 | 14.30 | 15.60 | 10.57 | | | | (0.60) | (0.58) | (0.56) | (0.79) | (0.47) | (0.68) | (0.12) | (0.76) | (0.85) | (0.86) | (0.86) | | NONLIVING-26 | Same | 18.66 | 17.17 | 26.39 | 16.14 | 19.86 | 15.58 | 16.63 | 10.87 | 10.24 | 10.07 | 10.26 | | | | (0.76) | (0.74) | (0.82) | (0.81) | (0.67) | (0.76) | (0.45) | (0.98) | (0.83) | (0.92) | (1.18) | | | Novel | 33.43 | 31.53 | 41.66 | 29.87 | 35.13 | 29.31 | 29.56 | 21.70 | 20.12 | 19.08 | 18.26 | | | | (0.67) | (0.65) | (0.67) | (0.71) | (0.54) | (0.64) | (0.21) | (0.86) | (0.75) | (0.82) | (1.12) | | LIVING-17 | Same | 12.63 | 11.05 | 18.32 | 10.46 | 14.43 | 10.14 | 9.87 | 4.57 | 3.95 | 3.81 | 4.21 | | | | (1.25) | (1.20) | (1.01) | (1.12) | (1.11) | (1.16) | (0.61) | (0.71) | (0.48) | (0.22) | (0.53) | | | Novel | 29.03 | 26.96 | 35.67 | 26.11 | 31.73 | 25.73 | 23.53 | 16.15 | 14.49 | 12.97 | 11.39 | | | | (1.44) | (1.38) | (1.09) | (1.27) | (1.19) | (1.35) | (0.52) | (1.36) | (1.46) | (1.52) | (1.72) |\nTable 3: Mean Absolute estimation Error (MAE) results for different datasets in our setup grouped by the nature of shift. 'Same' refers to same subpopulation shifts and 'Novel' refers novel subpopulation shifts. We include details about the target sets considered in each shift in Table 2. Post T denotes use of TS calibration on source. For language datasets, we use DistilBERT-base-uncased, for vision dataset we report results with DenseNet model with the exception of MNIST where we use FCN. Across all datasets, we observe that ATC achieves superior performance (lower MAE is better). For GDE post T and pre T estimates match since TS doesn't alter the argmax prediction. Results reported by aggregating MAE numbers over 4 different seeds. Values in parenthesis (i.e., $(\\cdot)$ ) denote standard deviation values.\n", "images": [], "dimensions": {"dpi": 200, "height": 2200, "width": 1700}}, {"index": 29, "markdown": "| Dataset | Shift | IM | | AC | | DOC | | GDE | ATC-MC (Ours) | | ATC-NE (Ours) | | | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | | | | Pre T | Post T | Pre T | Post T | Pre T | Post T | Post T | Pre T | Post T | Pre T | Post T | | CIFAR10 | Natural | 7.14 | 6.20 | 10.25 | 7.06 | 7.68 | 6.35 | 5.74 | 4.02 | 3.85 | 3.76 | 3.38 | | | | (0.14) | (0.11) | (0.31) | (0.33) | (0.28) | (0.27) | (0.25) | (0.38) | (0.30) | (0.33) | (0.32) | | | Synthetic | 12.62 | 10.75 | 16.50 | 11.91 | 13.93 | 11.20 | 7.97 | 5.66 | 5.03 | 4.87 | 3.63 | | | | (0.76) | (0.71) | (0.28) | (0.24) | (0.29) | (0.28) | (0.13) | (0.64) | (0.71) | (0.71) | (0.62) | | CIFAR100 | Synthetic | 12.77 | 12.34 | 16.89 | 12.73 | 11.18 | 9.63 | 12.00 | 5.61 | 5.55 | 5.65 | 5.76 | | | | (0.43) | (0.68) | (0.20) | (2.59) | (0.35) | (1.25) | (0.48) | (0.51) | (0.55) | (0.35) | (0.27) | | ImageNet200 | Natural | 12.63 | 7.99 | 23.08 | 7.22 | 15.40 | 6.33 | 5.00 | 4.60 | 1.80 | 4.06 | 1.38 | | | | (0.59) | (0.47) | (0.31) | (0.22) | (0.42) | (0.24) | (0.36) | (0.63) | (0.17) | (0.69) | (0.29) | | | Synthetic | 20.17 | 11.74 | 33.69 | 9.51 | 25.49 | 8.61 | 4.19 | 5.37 | 2.78 | 4.53 | 3.58 | | | | (0.74) | (0.80) | (0.73) | (0.51) | (0.66) | (0.50) | (0.14) | (0.88) | (0.23) | (0.79) | (0.33) | | ImageNet | Natural | 8.09 | 6.42 | 21.66 | 5.91 | 8.53 | 5.21 | 5.90 | 3.93 | 1.89 | 2.45 | 0.73 | | | | (0.25) | (0.28) | (0.38) | (0.22) | (0.26) | (0.25) | (0.44) | (0.26) | (0.21) | (0.16) | (0.10) | | | Synthetic | 13.93 | 9.90 | 28.05 | 7.56 | 13.82 | 6.19 | 6.70 | 3.33 | 2.55 | 2.12 | 5.06 | | | | (0.14) | (0.23) | (0.39) | (0.13) | (0.31) | (0.07) | (0.52) | (0.25) | (0.25) | (0.31) | (0.27) | | FMoW-WILDS | Natural | 5.15 | 3.55 | 34.64 | 5.03 | 5.58 | 3.46 | 5.08 | 2.59 | 2.33 | 2.52 | 2.22 | | | | (0.19) | (0.41) | (0.22) | (0.29) | (0.17) | (0.37) | (0.46) | (0.32) | (0.28) | (0.25) | (0.30) | | RxRx1-WILDS | Natural | 6.17 | 6.11 | 21.05 | 5.21 | 6.54 | 6.27 | 6.82 | 5.30 | 5.20 | 5.19 | 5.63 | | | | (0.20) | (0.24) | (0.31) | (0.18) | (0.21) | (0.20) | (0.31) | (0.30) | (0.44) | (0.43) | (0.55) | | Entity-13 | Same | 18.32 | 14.38 | 27.79 | 13.56 | 20.50 | 13.22 | 16.09 | 9.35 | 7.50 | 7.80 | 6.94 | | | | (0.29) | (0.53) | (1.18) | (0.58) | (0.47) | (0.58) | (0.84) | (0.79) | (0.65) | (0.62) | (0.71) | | | Novel | 28.82 | 24.03 | 38.97 | 22.96 | 31.66 | 22.61 | 25.26 | 17.11 | 13.96 | 14.75 | 9.94 | | | | (0.30) | (0.55) | (1.32) | (0.59) | (0.54) | (0.58) | (1.08) | (0.93) | (0.64) | (0.78) | | | Entity-30 | Same | 16.91 | 14.61 | 26.84 | 14.37 | 18.60 | 13.11 | 13.74 | 8.54 | 7.94 | 7.77 | 8.04 | | | | (1.33) | (1.11) | (2.15) | (1.34) | (1.69) | (1.30) | (1.07) | (1.47) | (1.38) | (1.44) | (1.51) | | | Novel | 28.66 | 25.83 | 39.21 | 25.03 | 30.95 | 23.73 | 23.15 | 15.57 | 13.24 | 12.44 | 11.05 | | | | (1.16) | (0.88) | (2.03) | (1.11) | (1.64) | (1.11) | (0.51) | (1.44) | (1.15) | (1.26) | (1.13) | | NonLIVING-26 | Same | 17.43 | 15.95 | 27.70 | 15.40 | 18.06 | 14.58 | 16.99 | 10.79 | 10.13 | 10.05 | 10.29 | | | | (0.90) | (0.86) | (0.90) | (0.69) | (1.00) | (0.78) | (1.25) | (0.62) | (0.32) | (0.46) | (0.79) | | | Novel | 29.51 | 27.75 | 40.02 | 26.77 | 30.36 | 25.93 | 27.70 | 19.64 | 17.75 | 16.90 | 15.69 | | | | (0.86) | (0.82) | (0.76) | (0.82) | (0.95) | (0.80) | (1.42) | (0.68) | (0.53) | (0.60) | (0.83) | | LIVING-17 | Same | 14.28 | 12.21 | 23.46 | 11.16 | 15.22 | 10.78 | 10.49 | 4.92 | 4.23 | 4.19 | 4.73 | | | | (0.96) | (0.93) | (1.16) | (0.90) | (0.96) | (0.99) | (0.97) | (0.57) | (0.42) | (0.35) | (0.24) | | | Novel | 28.91 | 26.35 | 38.62 | 24.91 | 30.32 | 24.52 | 22.49 | 15.42 | 13.02 | 12.29 | 10.34 | | | | (0.66) | (0.73) | (1.01) | (0.61) | (0.59) | (0.74) | (0.85) | (0.59) | (0.53) | (0.73) | (0.62) |\nTable 4: Mean Absolute estimation Error (MAE) results for different datasets in our setup grouped by the nature of shift for ResNet model. 'Same' refers to same subpopulation shifts and 'Novel' refers novel subpopulation shifts. We include details about the target sets considered in each shift in Table 2. Post T denotes use of TS calibration on source. Across all datasets, we observe that ATC achieves superior performance (lower MAE is better). For GDE post T and pre T estimates match since TS doesn't alter the argmax prediction. Results reported by aggregating MAE numbers over 4 different seeds. Values in parenthesis (i.e., $(\\cdot)$ ) denote standard deviation values.\n", "images": [], "dimensions": {"dpi": 200, "height": 2200, "width": 1700}}], "model": "mistral-ocr-2503-completion", "usage_info": {"pages_processed": 29, "doc_size_bytes": null}} + libraries_list_v1: + speakeasy-default-libraries-list-v1: + responses: + "200": + application/json: {"data": [{"id": "bfc452fd-4bcb-46ec-9f68-ceea101e924d", "name": "", "created_at": "2024-01-31T13:50:47.409Z", "updated_at": "2023-04-09T15:28:24.261Z", "owner_id": "3fb92cf9-0fea-44d0-958f-16963601a1f0", "owner_type": "", "total_size": 811051, "nb_documents": 634577, "chunk_size": 502060}]} + libraries_create_v1: + speakeasy-default-libraries-create-v1: + requestBody: + application/json: {"name": ""} + responses: + "201": + application/json: {"id": "7285d921-bbab-471e-a2df-600e096d8aca", "name": "", "created_at": "2025-12-10T18:12:15.618Z", "updated_at": "2023-12-29T15:14:03.343Z", "owner_id": "d5e2af8f-c98a-479e-aece-62d79ea6bab3", "owner_type": "", "total_size": 866940, "nb_documents": 123652, "chunk_size": 274694} + "422": + application/json: {} + libraries_get_v1: + speakeasy-default-libraries-get-v1: + parameters: + path: + library_id: "d0d23a1e-bfe5-45e7-b7bb-22a4ea78d47f" + responses: + "200": + application/json: {"id": "24e6ac5e-61cb-4f2c-b0c0-806dfd5d8dbf", "name": "", "created_at": "2023-01-19T09:20:07.756Z", "updated_at": "2023-05-28T00:39:57.656Z", "owner_id": "546a730e-7d06-4324-a4fd-2b7ff127978c", "owner_type": "", "total_size": 191122, "nb_documents": 932135, "chunk_size": null} + "422": + application/json: {} + libraries_delete_v1: + speakeasy-default-libraries-delete-v1: + parameters: + path: + library_id: "6cad0b6e-fd2e-4d11-a48b-21d30fb7c17a" + responses: + "200": + application/json: {"id": "04e83772-3f8e-41d3-a053-763ed9937e07", "name": "", "created_at": "2025-03-15T23:45:26.060Z", "updated_at": "2024-08-03T06:23:12.129Z", "owner_id": "f636aa46-e1d5-4df4-966b-de4af27da6db", "owner_type": "", "total_size": 268102, "nb_documents": 821714, "chunk_size": null} + "422": + application/json: {} + libraries_update_v1: + speakeasy-default-libraries-update-v1: + parameters: + path: + library_id: "e01880c3-d0b5-4a29-8b1b-abdb8ce917e4" + requestBody: + application/json: {} + responses: + "200": + application/json: {"id": "c3bb20a7-df8c-4461-8cfb-9e2a978c00da", "name": "", "created_at": "2025-03-12T04:49:28.349Z", "updated_at": "2025-12-18T03:09:25.092Z", "owner_id": "734e66b8-ae70-4069-9ebb-7eb7ee3967d5", "owner_type": "", "total_size": 762363, "nb_documents": 896591, "chunk_size": 507889} + "422": + application/json: {} + libraries_documents_list_v1: + speakeasy-default-libraries-documents-list-v1: + parameters: + path: + library_id: "5c3ca4cd-62bc-4c71-ad8a-1531ae80d078" + query: + page_size: 100 + page: 0 + sort_by: "created_at" + sort_order: "desc" + responses: + "200": + application/json: {"pagination": {"total_items": 23246, "total_pages": 881485, "current_page": 173326, "page_size": 318395, "has_more": false}, "data": [{"id": "5106c0c7-30fb-4fd3-9083-129b77f9f509", "library_id": "71eb68a2-756e-48b0-9d2b-a04d7bf95ff5", "hash": "", "mime_type": "", "extension": "pdf", "size": 367159, "name": "", "created_at": "2024-09-24T04:50:43.988Z", "process_status": "noop", "uploaded_by_id": "7d65f4d8-1997-479f-bfb4-535c0144b48c", "uploaded_by_type": "", "processing_status": "", "tokens_processing_total": 957230}]} + "422": + application/json: {} + libraries_documents_upload_v1: + speakeasy-default-libraries-documents-upload-v1: + parameters: + path: + library_id: "a02150d9-5ee0-4877-b62c-28b1fcdf3b76" + requestBody: + multipart/form-data: {"file": "x-file: example.file"} + responses: + "200": + application/json: {"id": "d40f9b56-c832-405d-aa99-b3e442254dd8", "library_id": "868d7955-009a-4433-bfc6-ad7b4be4e7e4", "hash": "", "mime_type": "", "extension": "m2v", "size": 418415, "name": "", "created_at": "2025-04-30T20:11:27.130Z", "process_status": "in_progress", "uploaded_by_id": "7db8d896-09c9-438c-b6dc-aa5c70102b3f", "uploaded_by_type": "", "processing_status": "", "tokens_processing_total": 61161} + "422": + application/json: {} + libraries_documents_get_v1: + speakeasy-default-libraries-documents-get-v1: + parameters: + path: + library_id: "03d908c8-90a1-44fd-bf3a-8490fb7c9a03" + document_id: "90973aec-0508-4375-8b00-91d732414745" + responses: + "200": + application/json: {"id": "0de60230-717d-459a-8c0f-fbb9360c01be", "library_id": "e0bf3cf9-cd3b-405b-b842-ac7fcb9c373e", "hash": "", "mime_type": "", "extension": "jpe", "size": 402478, "name": "", "created_at": "2023-07-29T21:43:20.750Z", "process_status": "self_managed", "uploaded_by_id": "d5eadabe-d7f2-4f87-a337-f80c192f886d", "uploaded_by_type": "", "processing_status": "", "tokens_processing_total": 793889} + "422": + application/json: {} + libraries_documents_update_v1: + speakeasy-default-libraries-documents-update-v1: + parameters: + path: + library_id: "3ddd8d93-dca5-4a6d-980d-173226c35742" + document_id: "2a25e44c-b160-40ca-b5c2-b65fb2fcae34" + requestBody: + application/json: {} + responses: + "200": + application/json: {"id": "1111e519-9ba5-42de-9301-938fbfee59fc", "library_id": "70aac5e3-23f7-439b-bbef-090e4c1dbd6d", "hash": "", "mime_type": "", "extension": "m1v", "size": 802305, "name": "", "created_at": "2024-07-02T20:02:03.680Z", "process_status": "missing_content", "uploaded_by_id": "08471957-b27d-4437-8242-57256727dc49", "uploaded_by_type": "", "processing_status": "", "tokens_processing_total": 806683} + "422": + application/json: {} + libraries_documents_delete_v1: + speakeasy-default-libraries-documents-delete-v1: + parameters: + path: + library_id: "005daae9-d42e-407d-82d7-2261c6a1496c" + document_id: "edc236b0-baff-49a9-884b-4ca36a258da4" + responses: + "422": + application/json: {} + libraries_documents_get_text_content_v1: + speakeasy-default-libraries-documents-get-text-content-v1: + parameters: + path: + library_id: "1d177215-3b6b-45ba-9fa9-baf773223bec" + document_id: "60214c91-2aba-4692-a4e6-a53365de8caf" + responses: + "200": + application/json: {"text": ""} + "422": + application/json: {} + libraries_documents_get_status_v1: + speakeasy-default-libraries-documents-get-status-v1: + parameters: + path: + library_id: "e6906f70-368f-4155-80da-c1718f01bc43" + document_id: "2c904915-d831-4e9d-a345-8ce405bcef66" + responses: + "200": + application/json: {"document_id": "90473b79-1fd5-437f-bee0-6638bdf69c90", "process_status": "waiting_for_capacity", "processing_status": ""} + "422": + application/json: {} + libraries_documents_get_signed_url_v1: + speakeasy-default-libraries-documents-get-signed-url-v1: + parameters: + path: + library_id: "23cf6904-a602-4ee8-9f5b-8efc557c336d" + document_id: "48598486-df71-4994-acbb-1133c72efa8c" + responses: + "200": + application/json: "https://burdensome-jellyfish.name" + "422": + application/json: {} + libraries_documents_get_extracted_text_signed_url_v1: + speakeasy-default-libraries-documents-get-extracted-text-signed-url-v1: + parameters: + path: + library_id: "a6f15de3-1e82-4f95-af82-851499042ef8" + document_id: "9749d4f9-24e5-4ca2-99a3-a406863f805d" + responses: + "200": + application/json: "https://athletic-disadvantage.info" + "422": + application/json: {} + libraries_documents_reprocess_v1: + speakeasy-default-libraries-documents-reprocess-v1: + parameters: + path: + library_id: "51b29371-de8f-4ba4-932b-a0bafb3a7f64" + document_id: "3052422c-49ca-45ac-a918-cadb35d61fd8" + responses: + "422": + application/json: {} + libraries_share_list_v1: + speakeasy-default-libraries-share-list-v1: + parameters: + path: + library_id: "d2169833-d8e2-416e-a372-76518d3d99c2" + responses: + "200": + application/json: {"data": [{"library_id": "26c08a7a-d226-4d29-b4d8-c08f0ad41dd1", "org_id": "254e3633-51b9-47a9-bc14-466ecf29d167", "role": "", "share_with_type": "", "share_with_uuid": "815eb88e-1f97-4782-863f-5fd00d37268b"}]} + "422": + application/json: {} + libraries_share_create_v1: + speakeasy-default-libraries-share-create-v1: + parameters: + path: + library_id: "36de3a24-5b1c-4c8f-9d84-d5642205a976" + requestBody: + application/json: {"level": "Viewer", "share_with_uuid": "0ae92ecb-21ed-47c5-9f7e-0b2cbe325a20", "share_with_type": "User"} + responses: + "200": + application/json: {"library_id": "45b3a5b2-8b81-4453-9130-ded7f1e5a366", "org_id": "0fa6e542-f04b-431e-a1be-76a9a92b0e68", "role": "", "share_with_type": "", "share_with_uuid": "cdbcc0c5-e577-4880-8ed3-f919421d4fc5"} + "422": + application/json: {} + libraries_share_delete_v1: + speakeasy-default-libraries-share-delete-v1: + parameters: + path: + library_id: "709e3cad-9fb2-4f4e-bf88-143cf1808107" + requestBody: + application/json: {"share_with_uuid": "b843cc47-ce8f-4354-8cfc-5fcd7fb2865b", "share_with_type": "User"} + responses: + "200": + application/json: {"library_id": "7f9c6af4-e362-4cf1-9363-0409d51c2dfa", "org_id": "6b2cac3a-b29c-4d8f-bebb-0db06ec1bf97", "role": "", "share_with_type": "", "share_with_uuid": "618c78f1-41ca-45c3-8ef2-7d78898c7061"} + "422": + application/json: {} + audio_api_v1_transcriptions_post: + speakeasy-default-audio-api-v1-transcriptions-post: + requestBody: + multipart/form-data: {"model": "Model X", "stream": false} + responses: + "200": + application/json: {"model": "Beetle", "text": "", "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}, "language": ""} + userExample: + requestBody: + multipart/form-data: {"model": "voxtral-mini-latest", "stream": false, "diarize": false} + responses: + "200": + application/json: {"model": "voxtral-mini-2507", "text": "This week, I traveled to Chicago to deliver my final farewell address to the nation, following in the tradition of presidents before me. It was an opportunity to say thank you. Whether we've seen eye to eye or rarely agreed at all, my conversations with you, the American people, in living rooms, in schools, at farms and on factory floors, at diners and on distant military outposts, All these conversations are what have kept me honest, kept me inspired, and kept me going. Every day, I learned from you. You made me a better President, and you made me a better man.\nOver the course of these eight years, I've seen the goodness, the resilience, and the hope of the American people. I've seen neighbors looking out for each other as we rescued our economy from the worst crisis of our lifetimes. I've hugged cancer survivors who finally know the security of affordable health care. I've seen communities like Joplin rebuild from disaster, and cities like Boston show the world that no terrorist will ever break the American spirit. I've seen the hopeful faces of young graduates and our newest military officers. I've mourned with grieving families searching for answers. And I found grace in a Charleston church. I've seen our scientists help a paralyzed man regain his sense of touch, and our wounded warriors walk again. I've seen our doctors and volunteers rebuild after earthquakes and stop pandemics in their tracks. I've learned from students who are building robots and curing diseases, and who will change the world in ways we can't even imagine. I've seen the youngest of children remind us of our obligations to care for our refugees, to work in peace, and above all, to look out for each other.\nThat's what's possible when we come together in the slow, hard, sometimes frustrating, but always vital work of self-government. But we can't take our democracy for granted. All of us, regardless of party, should throw ourselves into the work of citizenship. Not just when there is an election. Not just when our own narrow interest is at stake. But over the full span of a lifetime. If you're tired of arguing with strangers on the Internet, try to talk with one in real life. If something needs fixing, lace up your shoes and do some organizing. If you're disappointed by your elected officials, then grab a clipboard, get some signatures, and run for office yourself.\nOur success depends on our participation, regardless of which way the pendulum of power swings. It falls on each of us to be guardians of our democracy, to embrace the joyous task we've been given to continually try to improve this great nation of ours. Because for all our outward differences, we all share the same proud title – citizen.\nIt has been the honor of my life to serve you as President. Eight years later, I am even more optimistic about our country's promise. And I look forward to working along your side as a citizen for all my days that remain.\nThanks, everybody. God bless you. And God bless the United States of America.\n", "segments": [], "usage": {"prompt_tokens": 4, "completion_tokens": 635, "total_tokens": 3264, "prompt_audio_seconds": 203}, "language": "en"} + audio_api_v1_transcriptions_post_stream: + speakeasy-default-audio-api-v1-transcriptions-post-stream: + requestBody: + multipart/form-data: {"model": "Camry", "stream": true, "diarize": false} + agents_api_v1_conversations_delete: + speakeasy-default-agents-api-v1-conversations-delete: + parameters: + path: + conversation_id: "" + responses: + "422": + application/json: {} + agents_api_v1_agents_delete: + speakeasy-default-agents-api-v1-agents-delete: + parameters: + path: + agent_id: "" + responses: + "422": + application/json: {} + agents_api_v1_agents_list_versions: + speakeasy-default-agents-api-v1-agents-list-versions: + parameters: + path: + agent_id: "" + query: + page: 0 + page_size: 20 + responses: + "200": + application/json: [{"completion_args": {"response_format": {"type": "text"}}, "model": "Prius", "name": "", "object": "agent", "id": "", "version": 866135, "versions": [849276], "created_at": "2024-07-03T17:01:49.200Z", "updated_at": "2026-06-15T18:44:26.883Z", "deployment_chat": true, "source": ""}] + "422": + application/json: {} + agents_api_v1_agents_get_version: + speakeasy-default-agents-api-v1-agents-get-version: + parameters: + path: + agent_id: "" + version: "788393" + responses: + "200": + application/json: {"completion_args": {"response_format": {"type": "text"}}, "model": "Model 3", "name": "", "object": "agent", "id": "", "version": 377706, "versions": [658369, 642981], "created_at": "2024-10-02T23:01:15.980Z", "updated_at": "2026-12-22T00:55:26.568Z", "deployment_chat": false, "source": ""} + "422": + application/json: {} + agents_api_v1_agents_create_or_update_alias: + speakeasy-default-agents-api-v1-agents-create-or-update-alias: + parameters: + path: + agent_id: "" + query: + alias: "" + version: 595141 + responses: + "200": + application/json: {"alias": "", "version": 768764, "created_at": "2026-12-28T00:40:21.715Z", "updated_at": "2025-09-01T12:54:58.254Z"} + "422": + application/json: {} + agents_api_v1_agents_list_version_aliases: + speakeasy-default-agents-api-v1-agents-list-version-aliases: + parameters: + path: + agent_id: "" + responses: + "200": + application/json: [{"alias": "", "version": 318290, "created_at": "2025-10-02T20:25:32.322Z", "updated_at": "2026-11-19T02:58:37.894Z"}] + "422": + application/json: {} + ListModels: + userExample: + responses: + "200": + application/json: {"object": "list"} + RetrieveModel: + speakeasy-default-retrieve-model: + parameters: + path: + model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" + responses: + "200": + application/json: {"id": "", "object": "model", "owned_by": "mistralai", "capabilities": {"completion_chat": false, "function_calling": false, "completion_fim": false, "fine_tuning": false, "vision": false, "ocr": false, "classification": false, "moderation": false, "audio": false, "audio_transcription": false}, "max_context_length": 32768, "type": "base"} + "422": + application/json: {} + userExample: + parameters: + path: + model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" + responses: + "200": + application/json: {"id": "", "object": "model", "owned_by": "mistralai", "capabilities": {"completion_chat": false, "function_calling": false, "completion_fim": false, "fine_tuning": false, "vision": false, "ocr": false, "classification": false, "moderation": false, "audio": false, "audio_transcription": false}, "max_context_length": 32768, "type": "fine-tuned", "job": "Principal Implementation Assistant", "root": "", "archived": false} + DeleteModel: + speakeasy-default-delete-model: + parameters: + path: + model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" + responses: + "200": + application/json: {"id": "ft:open-mistral-7b:587a6b29:20240514:7e773925", "object": "model", "deleted": true} + "422": + application/json: {} + userExample: + parameters: + path: + model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" + responses: + "200": + application/json: {"id": "ft:open-mistral-7b:587a6b29:20240514:7e773925", "object": "model", "deleted": true} + UpdateModel: + speakeasy-default-update-model: + parameters: + path: + model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" + requestBody: + application/json: {} + responses: + "200": + application/json: {"id": "", "object": "model", "created": 76471, "owned_by": "", "workspace_id": "", "root": "", "root_version": "", "archived": true, "capabilities": {"completion_chat": true, "completion_fim": false, "function_calling": false, "fine_tuning": false, "classification": false}, "max_context_length": 32768, "job": "9765ed11-3bc9-49ff-a19d-06665406d404", "model_type": "completion"} + ArchiveModel: + speakeasy-default-archive-model: + parameters: + path: + model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" + responses: + "200": + application/json: {"id": "", "object": "model", "archived": true} + UnarchiveModel: + speakeasy-default-unarchive-model: + parameters: + path: + model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" + responses: + "200": + application/json: {"id": "", "object": "model", "archived": false} + StartConversation: + speakeasy-default-start-conversation: + requestBody: + application/json: {"inputs": "", "stream": false, "completion_args": {"response_format": {"type": "text"}}} + responses: + "200": + application/json: {"object": "conversation.response", "conversation_id": "", "outputs": [{"object": "entry", "type": "function.call", "tool_call_id": "", "name": "", "arguments": ""}], "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}} + "422": + application/json: {} + ListConversations: + speakeasy-default-list-conversations: + parameters: + query: + page: 0 + page_size: 100 + responses: + "200": + application/json: [{"completion_args": {"response_format": {"type": "text"}}, "object": "conversation", "id": "", "created_at": "2026-05-02T18:35:22.595Z", "updated_at": "2024-04-15T10:58:56.705Z", "model": "Silverado"}] + "422": + application/json: {} + GetConversation: + speakeasy-default-get-conversation: + parameters: + path: + conversation_id: "" + responses: + "200": + application/json: {"object": "conversation", "id": "", "created_at": "2026-10-30T16:36:24.274Z", "updated_at": "2026-03-08T22:30:16.213Z", "agent_id": ""} + "422": + application/json: {} + DeleteConversation: + speakeasy-default-delete-conversation: + parameters: + path: + conversation_id: "" + responses: + "422": + application/json: {} + AppendConversation: + speakeasy-default-append-conversation: + parameters: + path: + conversation_id: "" + requestBody: + application/json: {"inputs": [], "stream": false, "store": true, "handoff_execution": "server", "completion_args": {"response_format": {"type": "text"}}} + responses: + "200": + application/json: {"object": "conversation.response", "conversation_id": "", "outputs": [{"object": "entry", "type": "function.call", "tool_call_id": "", "name": "", "arguments": {"key": "", "key1": ""}}], "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}} + "422": + application/json: {} + GetConversationHistory: + speakeasy-default-get-conversation-history: + parameters: + path: + conversation_id: "" + responses: + "200": + application/json: {"object": "conversation.history", "conversation_id": "", "entries": []} + "422": + application/json: {} + GetConversationMessages: + speakeasy-default-get-conversation-messages: + parameters: + path: + conversation_id: "" + responses: + "200": + application/json: {"object": "conversation.messages", "conversation_id": "", "messages": []} + "422": + application/json: {} + RestartConversation: + speakeasy-default-restart-conversation: + parameters: + path: + conversation_id: "" + requestBody: + application/json: {"inputs": "", "stream": false, "store": true, "handoff_execution": "server", "completion_args": {"response_format": {"type": "text"}}, "from_entry_id": ""} + responses: + "200": + application/json: {"object": "conversation.response", "conversation_id": "", "outputs": [{"object": "entry", "type": "function.call", "tool_call_id": "", "name": "", "arguments": ""}], "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}} + "422": + application/json: {} + StartConversationStream: + speakeasy-default-start-conversation-stream: + requestBody: + application/json: {"inputs": "", "stream": true, "completion_args": {"response_format": {"type": "text"}}} + responses: + "422": + application/json: {} + AppendConversationStream: + speakeasy-default-append-conversation-stream: + parameters: + path: + conversation_id: "" + requestBody: + application/json: {"inputs": "", "stream": true, "store": true, "handoff_execution": "server", "completion_args": {"response_format": {"type": "text"}}} + responses: + "422": + application/json: {} + RestartConversationStream: + speakeasy-default-restart-conversation-stream: + parameters: + path: + conversation_id: "" + requestBody: + application/json: {"inputs": "", "stream": true, "store": true, "handoff_execution": "server", "completion_args": {"response_format": {"type": "text"}}, "from_entry_id": ""} + responses: + "422": + application/json: {} + CreateAgent: + speakeasy-default-create-agent: + requestBody: + application/json: {"completion_args": {"response_format": {"type": "text"}}, "model": "Mustang", "name": ""} + responses: + "200": + application/json: {"completion_args": {"response_format": {"type": "text"}}, "model": "Taurus", "name": "", "object": "agent", "id": "", "version": 388058, "versions": [980917, 959600], "created_at": "2024-07-23T17:25:11.997Z", "updated_at": "2025-07-14T09:13:03.268Z", "deployment_chat": false, "source": ""} + "422": + application/json: {} + ListAgents: + speakeasy-default-list-agents: + parameters: + query: + page: 0 + page_size: 20 + responses: + "200": + application/json: [{"completion_args": {"response_format": {"type": "text"}}, "model": "Challenger", "name": "", "object": "agent", "id": "", "version": 679172, "versions": [491437], "created_at": "2026-05-11T12:36:32.958Z", "updated_at": "2026-08-23T04:04:31.448Z", "deployment_chat": false, "source": ""}] + "422": + application/json: {} + GetAgent: + speakeasy-default-get-agent: + parameters: + path: + agent_id: "" + responses: + "200": + application/json: {"completion_args": {"response_format": {"type": "text"}}, "model": "F-150", "name": "", "object": "agent", "id": "", "version": 928666, "versions": [246402], "created_at": "2024-02-28T12:05:26.160Z", "updated_at": "2024-05-16T04:31:56.940Z", "deployment_chat": false, "source": ""} + "422": + application/json: {} + UpdateAgent: + speakeasy-default-update-agent: + parameters: + path: + agent_id: "" + requestBody: + application/json: {"completion_args": {"response_format": {"type": "text"}}} + responses: + "200": + application/json: {"completion_args": {"response_format": {"type": "text"}}, "model": "1", "name": "", "object": "agent", "id": "", "version": 388537, "versions": [955918, 365950, 823288], "created_at": "2026-11-04T08:06:14.896Z", "updated_at": "2025-05-23T04:44:27.181Z", "deployment_chat": true, "source": ""} + "422": + application/json: {} + DeleteAgent: + speakeasy-default-delete-agent: + parameters: + path: + agent_id: "" + responses: + "422": + application/json: {} + UpdateAgentVersion: + speakeasy-default-update-agent-version: + parameters: + path: + agent_id: "" + query: + version: 958693 + responses: + "200": + application/json: {"completion_args": {"response_format": {"type": "text"}}, "model": "XTS", "name": "", "object": "agent", "id": "", "version": 203502, "versions": [449666], "created_at": "2024-09-21T15:29:30.503Z", "updated_at": "2026-10-29T17:49:52.509Z", "deployment_chat": true, "source": ""} + "422": + application/json: {} + ListAgentVersions: + speakeasy-default-list-agent-versions: + parameters: + path: + agent_id: "" + query: + page: 0 + page_size: 20 + responses: + "200": + application/json: [{"completion_args": {"response_format": {"type": "text"}}, "model": "Volt", "name": "", "object": "agent", "id": "", "version": 45747, "versions": [584697, 811109], "created_at": "2024-12-19T10:33:53.873Z", "updated_at": "2025-10-05T12:31:56.977Z", "deployment_chat": false, "source": ""}] + "422": + application/json: {} + GetAgentVersion: + speakeasy-default-get-agent-version: + parameters: + path: + agent_id: "" + version: "" + responses: + "200": + application/json: {"completion_args": {"response_format": {"type": "text"}}, "model": "Camaro", "name": "", "object": "agent", "id": "", "version": 663020, "versions": [210212], "created_at": "2026-11-16T03:32:55.781Z", "updated_at": "2026-09-28T23:51:49.611Z", "deployment_chat": true, "source": ""} + "422": + application/json: {} + CreateOrUpdateAgentAlias: + speakeasy-default-create-or-update-agent-alias: + parameters: + path: + agent_id: "" + query: + alias: "" + version: 154719 + responses: + "200": + application/json: {"alias": "", "version": 991981, "created_at": "2025-09-02T11:06:53.872Z", "updated_at": "2024-12-17T11:05:04.936Z"} + "422": + application/json: {} + ListAgentAliases: + speakeasy-default-list-agent-aliases: + parameters: + path: + agent_id: "" + responses: + "200": + application/json: [{"alias": "", "version": 345116, "created_at": "2025-03-19T21:46:52.564Z", "updated_at": "2026-07-18T22:23:53.218Z"}] + "422": + application/json: {} + DeleteAgentAlias: + speakeasy-default-delete-agent-alias: + parameters: + path: + agent_id: "" + query: + alias: "" + responses: + "422": + application/json: {} + ListLibraries: + speakeasy-default-list-libraries: + responses: + "200": + application/json: {"data": []} + CreateLibrary: + speakeasy-default-create-library: + requestBody: + application/json: {"name": ""} + responses: + "201": + application/json: {"id": "7a160c5d-b74e-44df-8beb-aca6894fa845", "name": "", "created_at": "2026-05-19T08:13:56.459Z", "updated_at": "2026-06-02T23:02:36.047Z", "owner_id": null, "owner_type": "", "total_size": 236146, "nb_documents": 584286, "chunk_size": 369781} + "422": + application/json: {} + GetLibrary: + speakeasy-default-get-library: + parameters: + path: + library_id: "44e385d6-783e-4b21-8fae-5181e6817bc4" + responses: + "200": + application/json: {"id": "785b8f2b-04c4-4e51-aeee-10ba7210996d", "name": "", "created_at": "2026-06-02T21:55:32.209Z", "updated_at": "2026-06-28T12:11:02.341Z", "owner_id": "489e38bd-4195-4ab1-a06d-f1253bcc0e7a", "owner_type": "", "total_size": 733226, "nb_documents": 896348, "chunk_size": 594373} + "422": + application/json: {} + DeleteLibrary: + speakeasy-default-delete-library: + parameters: + path: + library_id: "441ba08a-3d1f-4700-8d6f-f32eeed49dff" + responses: + "200": + application/json: {"id": "a03c22a9-d4f2-4735-806c-b8497fe2a882", "name": "", "created_at": "2024-03-20T22:16:14.073Z", "updated_at": "2025-08-10T22:18:39.851Z", "owner_id": null, "owner_type": "", "total_size": 735078, "nb_documents": 443485, "chunk_size": 738927} + "422": + application/json: {} + UpdateLibrary: + speakeasy-default-update-library: + parameters: + path: + library_id: "27049553-3425-49ce-b965-fcb3a7ab03a3" + requestBody: + application/json: {} + responses: + "200": + application/json: {"id": "0c44cb97-9c48-4e8b-9837-239b80130faf", "name": "", "created_at": "2025-02-22T01:07:38.404Z", "updated_at": "2024-01-02T09:35:39.994Z", "owner_id": "9ea3bb36-40f8-41f9-ba61-d6f71a725ff2", "owner_type": "", "total_size": 234996, "nb_documents": 664396, "chunk_size": 337104} + "422": + application/json: {} + ListDocuments: + speakeasy-default-list-documents: + parameters: + path: + library_id: "05e1bda5-99b1-4baf-bb03-905d8e094f74" + query: + page_size: 100 + page: 0 + sort_by: "created_at" + sort_order: "desc" + responses: + "200": + application/json: {"pagination": {"total_items": 985775, "total_pages": 196446, "current_page": 86746, "page_size": 671573, "has_more": false}, "data": [{"id": "9b168ce6-0e63-4d0a-b784-71cab0b43775", "library_id": "01d6c3ae-df9c-448d-8e84-873b6588d655", "hash": "", "mime_type": "", "extension": "shtml", "size": null, "name": "", "created_at": "2024-06-29T16:51:59.433Z", "processing_status": "", "uploaded_by_id": "ce40c587-9bb9-48d4-8bd3-5ce14f8f07c8", "uploaded_by_type": "", "tokens_processing_total": 288046}]} + "422": + application/json: {} + UploadDocument: + speakeasy-default-upload-document: + parameters: + path: + library_id: "f973c54e-979a-4464-9d36-8cc31beb21fe" + requestBody: + multipart/form-data: {"file": "x-file: example.file"} + responses: + "200": + application/json: {"id": "a13f4191-9721-413d-ac5c-b8edadbfb34e", "library_id": "a6ea3cdd-242f-4132-baf8-9a2589d78cb2", "hash": "", "mime_type": "", "extension": "mp4v", "size": 731796, "name": "", "created_at": "2024-04-30T08:38:55.667Z", "processing_status": "", "uploaded_by_id": "fd1426b3-90f8-4b54-97de-c4f108cb2a63", "uploaded_by_type": "", "tokens_processing_total": 603440} + "422": + application/json: {} + GetDocument: + speakeasy-default-get-document: + parameters: + path: + library_id: "f9902d0a-1ea4-4953-be48-52df6edd302a" + document_id: "c3e12fd9-e840-46f2-8d4a-79985ed36d24" + responses: + "200": + application/json: {"id": "52c93ba5-b31c-4717-a099-f3415e6d4eea", "library_id": "912f1e36-456c-4551-bd6d-535931a66817", "hash": "", "mime_type": "", "extension": "wav", "size": null, "name": "", "created_at": "2026-09-30T22:43:59.455Z", "processing_status": "", "uploaded_by_id": "8578215b-d0b4-4ee2-857d-dcb0686d45f1", "uploaded_by_type": "", "tokens_processing_total": 833979} + "422": + application/json: {} + UpdateDocument: + speakeasy-default-update-document: + parameters: + path: + library_id: "3b900c67-d2b6-4637-93f2-3eff2c85f8dd" + document_id: "66f935fd-37ec-441f-bca5-b1129befcbca" + requestBody: + application/json: {} + responses: + "200": + application/json: {"id": "6a5ec2ab-bd54-4cc8-a761-e51374243293", "library_id": "f8b3b9a7-bb4b-4b47-b3b2-bb6db5e92901", "hash": "", "mime_type": "", "extension": "gif", "size": null, "name": "", "created_at": "2025-11-09T02:41:11.680Z", "processing_status": "", "uploaded_by_id": "0f707dfd-bd39-42ad-9748-c0b305a13eb6", "uploaded_by_type": "", "tokens_processing_total": 170388} + "422": + application/json: {} + DeleteDocument: + speakeasy-default-delete-document: + parameters: + path: + library_id: "c728d742-7845-462b-84ad-2aacbaf1c7cf" + document_id: "ed3f5797-846a-4abe-8e30-39b2fd2323e0" + responses: + "422": + application/json: {} + GetDocumentTextContent: + speakeasy-default-get-document-text-content: + parameters: + path: + library_id: "12689dc1-50df-4a0d-8202-2757f7a8c141" + document_id: "9d4057e9-d112-437c-911e-6ee031389739" + responses: + "200": + application/json: {"text": ""} + "422": + application/json: {} + GetDocumentStatus: + speakeasy-default-get-document-status: + parameters: + path: + library_id: "41bb33c4-7e53-453d-bf21-398bb2862772" + document_id: "416b95cf-19c8-45af-84be-26aaa3ab3666" + responses: + "200": + application/json: {"document_id": "b5b43c40-8e91-41d9-933c-096ee588639a", "processing_status": ""} + "422": + application/json: {} + GetDocumentSignedUrl: + speakeasy-default-get-document-signed-url: + parameters: + path: + library_id: "2dbbe172-1374-41be-b03d-a088c733612e" + document_id: "b5d88764-47f1-4485-9df1-658775428344" + responses: + "200": + application/json: "" + "422": + application/json: {} + GetDocumentExtractedTextSignedUrl: + speakeasy-default-get-document-extracted-text-signed-url: + parameters: + path: + library_id: "46d040ce-ae2e-4891-a54c-cdab6a8f62d8" + document_id: "3eddbfe2-3fd7-47f5-984b-b378e6950e37" + responses: + "200": + application/json: "" + "422": + application/json: {} + ReprocessDocument: + speakeasy-default-reprocess-document: + parameters: + path: + library_id: "76d357e4-d891-40c6-9d1e-6d6ce5056ee0" + document_id: "09798d2b-8f46-46c6-9765-8054a82a4bb2" + responses: + "422": + application/json: {} + ListLibraryAccesses: + speakeasy-default-list-library-accesses: + parameters: + path: + library_id: "9eb628ef-f118-47eb-b3cc-9750c4ca5fb6" + responses: + "200": + application/json: {"data": [{"library_id": "98821ea0-f6e2-444d-b922-e649cd549a2a", "org_id": "a33230f8-b93d-4f45-80ce-b45e8dd8b5fe", "role": "", "share_with_type": "", "share_with_uuid": "0e1f6eb2-b59e-4e38-b916-382b3383c228"}]} + "422": + application/json: {} + UpdateOrCreateLibraryAccess: + speakeasy-default-update-or-create-library-access: + parameters: + path: + library_id: "88bb030c-1cb5-4231-ba13-742c56554876" + requestBody: + application/json: {"level": "Viewer", "share_with_uuid": "6a736283-c1fa-49b0-9b6d-ea9309c0a766", "share_with_type": "Workspace"} + responses: + "200": + application/json: {"library_id": "b783a30a-ca47-4c15-8095-dee3502846e5", "org_id": "6721ec8e-e0c0-4e8e-be83-3c01f2f884a5", "role": "", "share_with_type": "", "share_with_uuid": null} + "422": + application/json: {} + DeleteLibraryAccess: + speakeasy-default-delete-library-access: + parameters: + path: + library_id: "fc7ab1cf-e33c-4791-a6e0-95ff1f921c43" + requestBody: + application/json: {"share_with_uuid": "5818ddff-3568-40f1-a9e4-39d6cb9f5c94", "share_with_type": "Org"} + responses: + "200": + application/json: {"library_id": "6eeb1c0b-8c49-4745-8e3a-eef5bace0782", "org_id": "36550d6e-a514-4601-bd5b-7a0978aab0c7", "role": "", "share_with_type": "", "share_with_uuid": "023a9d84-8615-44a6-acd3-59b113a45c43"} + "422": + application/json: {} + UploadFile: + userExample: + requestBody: + multipart/form-data: {"file": "x-file: example.file"} + responses: + "200": + application/json: {"id": "e85980c9-409e-4a46-9304-36588f6292b0", "object": "file", "bytes": 13000, "created_at": 1759500189, "filename": "example.file.jsonl", "purpose": "fine-tune", "sample_type": "instruct", "num_lines": 2, "mimetype": "application/jsonl", "source": "upload", "signature": "d4821d2de1917341"} + ListFiles: + userExample: + parameters: + query: + page: 0 + page_size: 100 + include_total: true + responses: + "200": + application/json: {"data": [{"id": "", "object": "file", "bytes": 13000, "created_at": 1759491994, "filename": "", "purpose": "batch", "sample_type": "batch_result", "num_lines": 2, "mimetype": "application/jsonl", "source": "mistral", "signature": null}, {"id": "", "object": "file", "bytes": 13000, "created_at": 1759491994, "filename": "", "purpose": "batch", "sample_type": "batch_result", "num_lines": 2, "mimetype": "application/jsonl", "source": "mistral", "signature": null}], "object": "list", "total": 2} + RetrieveFile: + userExample: + parameters: + path: + file_id: "654a62d9-b7ee-49ac-835e-af4153e3c9ec" + responses: + "200": + application/json: {"id": "e85980c9-409e-4a46-9304-36588f6292b0", "object": "file", "bytes": 13000, "created_at": 1759500189, "filename": "example.file.jsonl", "purpose": "fine-tune", "sample_type": "instruct", "num_lines": 2, "mimetype": "application/jsonl", "source": "upload", "signature": "d4821d2de1917341", "deleted": false} + DeleteFile: + userExample: + parameters: + path: + file_id: "789c27a4-69de-47c6-b67f-cf6e56ce9f41" + responses: + "200": + application/json: {"id": "e85980c9-409e-4a46-9304-36588f6292b0", "object": "file", "deleted": true} + DownloadFile: + speakeasy-default-download-file: + parameters: + path: + file_id: "e2ba278e-eac9-4050-ae8e-ec433e124efb" + responses: + "200": + application/octet-stream: "x-file: example.file" + GetFileSignedUrl: + userExample: + parameters: + path: + file_id: "7a0c108d-9e6b-4c47-990d-a20cba50b283" + query: + expiry: 24 + responses: + "200": + application/json: {"url": "https://mistralaifilesapiprodswe.blob.core.windows.net/fine-tune/.../.../e85980c9409e4a46930436588f6292b0.jsonl?se=2025-10-04T14%3A16%3A17Z&sp=r&sv=2025-01-05&sr=b&sig=..."} + ListFineTuningJobs: + speakeasy-default-list-fine-tuning-jobs: + parameters: + query: + page: 0 + page_size: 100 + created_by_me: false + responses: + "200": + application/json: {"object": "list", "total": 677316} + CreateFineTuningJob: + speakeasy-default-create-fine-tuning-job: + requestBody: + application/json: {"model": "Countach", "invalid_sample_skip_percentage": 0, "hyperparameters": {"learning_rate": 0.0001}} + responses: + "200": + application/json: {"id": "18371b47-e157-4d80-8d09-2687df8868e8", "auto_start": false, "model": "Fiesta", "status": "FAILED", "created_at": 475667, "modified_at": 452225, "training_files": [], "object": "job", "job_type": "completion", "hyperparameters": {"learning_rate": 0.0001}} + GetFineTuningJob: + speakeasy-default-get-fine-tuning-job: + parameters: + path: + job_id: "2855f873-414e-4cf5-a46e-e589e39ee809" + responses: + "200": + application/json: {"id": "b9f4ad32-1400-4751-8e0d-16c09b4b26e6", "auto_start": true, "model": "LeBaron", "status": "QUEUED", "created_at": 458966, "modified_at": 377090, "training_files": ["52d812c3-b5fe-4866-878e-39a5910f91df"], "object": "job", "job_type": "classifier", "hyperparameters": {"learning_rate": 0.0001}, "classifier_targets": [{"name": "", "labels": [], "weight": 6490.15, "loss_function": "single_class"}]} + CancelFineTuningJob: + speakeasy-default-cancel-fine-tuning-job: + parameters: + path: + job_id: "ee7d6f03-fcbb-43ca-8f17-0388c0832eb9" + responses: + "200": + application/json: {"id": "24b50383-3de5-4711-a14f-b71bbeccc6c5", "auto_start": true, "model": "Countach", "status": "CANCELLED", "created_at": 148194, "modified_at": 80833, "training_files": ["13ba2c85-5db5-4c14-94e4-2fcf030cecae", "85892e4f-5c84-4f38-bfb8-01072484489c", "723f89f0-65c0-43fa-9a9f-296acfe91134"], "object": "job", "job_type": "classifier", "hyperparameters": {"learning_rate": 0.0001}, "classifier_targets": [{"name": "", "labels": ["", ""], "weight": 1717.09, "loss_function": "single_class"}]} + StartFineTuningJob: + speakeasy-default-start-fine-tuning-job: + parameters: + path: + job_id: "da371429-0ec2-4cea-b9c7-73ce3a1dd76f" + responses: + "200": + application/json: {"id": "2628c0c5-a98f-4d0b-a22a-fba0b0b23112", "auto_start": false, "model": "Model 3", "status": "QUEUED", "created_at": 139851, "modified_at": 571341, "training_files": ["856f394d-d216-41ab-8fa1-a42fba9e7734"], "object": "job", "job_type": "completion", "hyperparameters": {"learning_rate": 0.0001}} + ListBatchJobs: + speakeasy-default-list-batch-jobs: + parameters: + query: + page: 0 + page_size: 100 + created_by_me: false + order_by: "-created" + responses: + "200": + application/json: {"object": "list", "total": 853018} + CreateBatchJob: + speakeasy-default-create-batch-job: + requestBody: + application/json: {"endpoint": "/v1/classifications", "model": "mistral-small-latest", "timeout_hours": 24} + responses: + "200": + application/json: {"id": "", "object": "batch", "input_files": ["936962bc-f885-485f-914e-fe90c1d312f9", "02e65e71-6f9f-4c39-9a54-bba0acb1e912", "4c5d848d-d86e-43cb-a795-1eaba0c96608"], "endpoint": "", "errors": [], "status": "SUCCESS", "created_at": 346291, "total_requests": 784915, "completed_requests": 663597, "succeeded_requests": 195848, "failed_requests": 688098} + GetBatchJob: + speakeasy-default-get-batch-job: + parameters: + path: + job_id: "358c80a1-79bd-43f0-8f0e-8186713aa3ba" + responses: + "200": + application/json: {"id": "", "object": "batch", "input_files": ["782a7fa0-6ea1-4be9-bce9-9ff61f81530d"], "endpoint": "", "errors": [{"message": "", "count": 1}], "status": "SUCCESS", "created_at": 878725, "total_requests": 913781, "completed_requests": 964506, "succeeded_requests": 119373, "failed_requests": 490093} + CancelBatchJob: + speakeasy-default-cancel-batch-job: + parameters: + path: + job_id: "393537d7-8b33-4931-a289-7f61f8757eda" + responses: + "200": + application/json: {"id": "", "object": "batch", "input_files": ["7309e534-200e-43a4-83c5-dc4c2a14c745"], "endpoint": "", "errors": [], "status": "FAILED", "created_at": 157212, "total_requests": 188914, "completed_requests": 685483, "succeeded_requests": 127060, "failed_requests": 428561} + agents_api_v1_agents_delete_alias: + speakeasy-default-agents-api-v1-agents-delete-alias: + parameters: + path: + agent_id: "" + query: + alias: "" + responses: + "422": + application/json: {} +examplesVersion: 1.0.2 +generatedTests: {} +releaseNotes: | + ## Python SDK Changes: + * `mistral.beta.libraries.documents.list()`: `response.data[].process_status` **Added** + * `mistral.beta.libraries.documents.upload()`: `response.process_status` **Added** + * `mistral.beta.libraries.documents.get()`: `response.process_status` **Added** + * `mistral.beta.libraries.documents.update()`: `response.process_status` **Added** + * `mistral.beta.libraries.documents.status()`: `response.process_status` **Added** +generatedFiles: + - .gitattributes + - .vscode/settings.json + - USAGE.md + - docs/models/agent.md + - docs/models/agentconversation.md + - docs/models/agentconversationobject.md + - docs/models/agentcreationrequest.md + - docs/models/agentcreationrequesttools.md + - docs/models/agenthandoffdoneevent.md + - docs/models/agenthandoffdoneeventtype.md + - docs/models/agenthandoffentry.md + - docs/models/agenthandoffentryobject.md + - docs/models/agenthandoffentrytype.md + - docs/models/agenthandoffstartedevent.md + - docs/models/agenthandoffstartedeventtype.md + - docs/models/agentobject.md + - docs/models/agentsapiv1agentsdeleterequest.md + - docs/models/agentsapiv1agentsgetrequest.md + - docs/models/agentsapiv1agentslistrequest.md + - docs/models/agentsapiv1agentsupdaterequest.md + - docs/models/agentsapiv1agentsupdateversionrequest.md + - docs/models/agentsapiv1conversationsappendrequest.md + - docs/models/agentsapiv1conversationsappendstreamrequest.md + - docs/models/agentsapiv1conversationsdeleterequest.md + - docs/models/agentsapiv1conversationsgetrequest.md + - docs/models/agentsapiv1conversationsgetresponsev1conversationsget.md + - docs/models/agentsapiv1conversationshistoryrequest.md + - docs/models/agentsapiv1conversationslistrequest.md + - docs/models/agentsapiv1conversationsmessagesrequest.md + - docs/models/agentsapiv1conversationsrestartrequest.md + - docs/models/agentsapiv1conversationsrestartstreamrequest.md + - docs/models/agentscompletionrequest.md + - docs/models/agentscompletionrequestmessages.md + - docs/models/agentscompletionrequeststop.md + - docs/models/agentscompletionrequesttoolchoice.md + - docs/models/agentscompletionstreamrequest.md + - docs/models/agentscompletionstreamrequestmessages.md + - docs/models/agentscompletionstreamrequeststop.md + - docs/models/agentscompletionstreamrequesttoolchoice.md + - docs/models/agenttools.md + - docs/models/agentupdaterequest.md + - docs/models/agentupdaterequesttools.md + - docs/models/apiendpoint.md + - docs/models/archiveftmodelout.md + - docs/models/archiveftmodeloutobject.md + - docs/models/arguments.md + - docs/models/assistantmessage.md + - docs/models/assistantmessagecontent.md + - docs/models/assistantmessagerole.md + - docs/models/attributes.md + - docs/models/audiochunk.md + - docs/models/audiochunktype.md + - docs/models/audiotranscriptionrequest.md + - docs/models/audiotranscriptionrequeststream.md + - docs/models/basemodelcard.md + - docs/models/basemodelcardtype.md + - docs/models/batcherror.md + - docs/models/batchjobin.md + - docs/models/batchjobout.md + - docs/models/batchjoboutobject.md + - docs/models/batchjobsout.md + - docs/models/batchjobsoutobject.md + - docs/models/batchjobstatus.md + - docs/models/builtinconnectors.md + - docs/models/chatclassificationrequest.md + - docs/models/chatcompletionchoice.md + - docs/models/chatcompletionrequest.md + - docs/models/chatcompletionrequesttoolchoice.md + - docs/models/chatcompletionresponse.md + - docs/models/chatcompletionstreamrequest.md + - docs/models/chatcompletionstreamrequestmessages.md + - docs/models/chatcompletionstreamrequeststop.md + - docs/models/chatcompletionstreamrequesttoolchoice.md + - docs/models/chatmoderationrequest.md + - docs/models/chatmoderationrequestinputs.md + - docs/models/checkpointout.md + - docs/models/classificationrequest.md + - docs/models/classificationrequestinputs.md + - docs/models/classificationresponse.md + - docs/models/classificationtargetresult.md + - docs/models/classifierdetailedjobout.md + - docs/models/classifierdetailedjoboutintegrations.md + - docs/models/classifierdetailedjoboutjobtype.md + - docs/models/classifierdetailedjoboutobject.md + - docs/models/classifierdetailedjoboutstatus.md + - docs/models/classifierftmodelout.md + - docs/models/classifierftmodeloutmodeltype.md + - docs/models/classifierftmodeloutobject.md + - docs/models/classifierjobout.md + - docs/models/classifierjoboutintegrations.md + - docs/models/classifierjoboutjobtype.md + - docs/models/classifierjoboutobject.md + - docs/models/classifierjoboutstatus.md + - docs/models/classifiertargetin.md + - docs/models/classifiertargetout.md + - docs/models/classifiertrainingparameters.md + - docs/models/classifiertrainingparametersin.md + - docs/models/codeinterpretertool.md + - docs/models/codeinterpretertooltype.md + - docs/models/completionargs.md + - docs/models/completionargsstop.md + - docs/models/completionchunk.md + - docs/models/completiondetailedjobout.md + - docs/models/completiondetailedjoboutintegrations.md + - docs/models/completiondetailedjoboutjobtype.md + - docs/models/completiondetailedjoboutobject.md + - docs/models/completiondetailedjoboutrepositories.md + - docs/models/completiondetailedjoboutstatus.md + - docs/models/completionevent.md + - docs/models/completionftmodelout.md + - docs/models/completionftmodeloutobject.md + - docs/models/completionjobout.md + - docs/models/completionjoboutobject.md + - docs/models/completionresponsestreamchoice.md + - docs/models/completionresponsestreamchoicefinishreason.md + - docs/models/completiontrainingparameters.md + - docs/models/completiontrainingparametersin.md + - docs/models/content.md + - docs/models/contentchunk.md + - docs/models/conversationappendrequest.md + - docs/models/conversationappendrequesthandoffexecution.md + - docs/models/conversationappendstreamrequest.md + - docs/models/conversationappendstreamrequesthandoffexecution.md + - docs/models/conversationevents.md + - docs/models/conversationeventsdata.md + - docs/models/conversationhistory.md + - docs/models/conversationhistoryobject.md + - docs/models/conversationinputs.md + - docs/models/conversationmessages.md + - docs/models/conversationmessagesobject.md + - docs/models/conversationrequest.md + - docs/models/conversationresponse.md + - docs/models/conversationresponseobject.md + - docs/models/conversationrestartrequest.md + - docs/models/conversationrestartrequesthandoffexecution.md + - docs/models/conversationrestartstreamrequest.md + - docs/models/conversationrestartstreamrequesthandoffexecution.md + - docs/models/conversationstreamrequest.md + - docs/models/conversationstreamrequesthandoffexecution.md + - docs/models/conversationstreamrequesttools.md + - docs/models/conversationusageinfo.md + - docs/models/data.md + - docs/models/deletefileout.md + - docs/models/deletemodelout.md + - docs/models/deletemodelv1modelsmodeliddeleterequest.md + - docs/models/deltamessage.md + - docs/models/document.md + - docs/models/documentlibrarytool.md + - docs/models/documentlibrarytooltype.md + - docs/models/documentout.md + - docs/models/documenttextcontent.md + - docs/models/documentupdatein.md + - docs/models/documenturlchunk.md + - docs/models/documenturlchunktype.md + - docs/models/embeddingdtype.md + - docs/models/embeddingrequest.md + - docs/models/embeddingrequestinputs.md + - docs/models/embeddingresponse.md + - docs/models/embeddingresponsedata.md + - docs/models/encodingformat.md + - docs/models/entitytype.md + - docs/models/entries.md + - docs/models/eventout.md + - docs/models/file.md + - docs/models/filechunk.md + - docs/models/filepurpose.md + - docs/models/filesapiroutesdeletefilerequest.md + - docs/models/filesapiroutesdownloadfilerequest.md + - docs/models/filesapiroutesgetsignedurlrequest.md + - docs/models/filesapirouteslistfilesrequest.md + - docs/models/filesapiroutesretrievefilerequest.md + - docs/models/filesapiroutesuploadfilemultipartbodyparams.md + - docs/models/fileschema.md + - docs/models/filesignedurl.md + - docs/models/fimcompletionrequest.md + - docs/models/fimcompletionrequeststop.md + - docs/models/fimcompletionresponse.md + - docs/models/fimcompletionstreamrequest.md + - docs/models/fimcompletionstreamrequeststop.md + - docs/models/finetuneablemodeltype.md + - docs/models/finishreason.md + - docs/models/format_.md + - docs/models/ftclassifierlossfunction.md + - docs/models/ftmodelcapabilitiesout.md + - docs/models/ftmodelcard.md + - docs/models/ftmodelcardtype.md + - docs/models/function.md + - docs/models/functioncall.md + - docs/models/functioncallentry.md + - docs/models/functioncallentryarguments.md + - docs/models/functioncallentryobject.md + - docs/models/functioncallentrytype.md + - docs/models/functioncallevent.md + - docs/models/functioncalleventtype.md + - docs/models/functionname.md + - docs/models/functionresultentry.md + - docs/models/functionresultentryobject.md + - docs/models/functionresultentrytype.md + - docs/models/functiontool.md + - docs/models/functiontooltype.md + - docs/models/githubrepositoryin.md + - docs/models/githubrepositoryintype.md + - docs/models/githubrepositoryout.md + - docs/models/githubrepositoryouttype.md + - docs/models/handoffexecution.md + - docs/models/httpvalidationerror.md + - docs/models/hyperparameters.md + - docs/models/imagegenerationtool.md + - docs/models/imagegenerationtooltype.md + - docs/models/imageurl.md + - docs/models/imageurlchunk.md + - docs/models/imageurlchunkimageurl.md + - docs/models/imageurlchunktype.md + - docs/models/inputentries.md + - docs/models/inputs.md + - docs/models/instructrequest.md + - docs/models/instructrequestinputs.md + - docs/models/instructrequestinputsmessages.md + - docs/models/instructrequestmessages.md + - docs/models/integrations.md + - docs/models/jobin.md + - docs/models/jobinintegrations.md + - docs/models/jobinrepositories.md + - docs/models/jobmetadataout.md + - docs/models/jobsapiroutesbatchcancelbatchjobrequest.md + - docs/models/jobsapiroutesbatchgetbatchjobrequest.md + - docs/models/jobsapiroutesbatchgetbatchjobsrequest.md + - docs/models/jobsapiroutesfinetuningarchivefinetunedmodelrequest.md + - docs/models/jobsapiroutesfinetuningcancelfinetuningjobrequest.md + - docs/models/jobsapiroutesfinetuningcancelfinetuningjobresponse.md + - docs/models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md + - docs/models/jobsapiroutesfinetuninggetfinetuningjobrequest.md + - docs/models/jobsapiroutesfinetuninggetfinetuningjobresponse.md + - docs/models/jobsapiroutesfinetuninggetfinetuningjobsrequest.md + - docs/models/jobsapiroutesfinetuningstartfinetuningjobrequest.md + - docs/models/jobsapiroutesfinetuningstartfinetuningjobresponse.md + - docs/models/jobsapiroutesfinetuningunarchivefinetunedmodelrequest.md + - docs/models/jobsapiroutesfinetuningupdatefinetunedmodelrequest.md + - docs/models/jobsapiroutesfinetuningupdatefinetunedmodelresponse.md + - docs/models/jobsout.md + - docs/models/jobsoutdata.md + - docs/models/jobsoutobject.md + - docs/models/jobtype.md + - docs/models/jsonschema.md + - docs/models/legacyjobmetadataout.md + - docs/models/legacyjobmetadataoutobject.md + - docs/models/librariesdeletev1request.md + - docs/models/librariesdocumentsdeletev1request.md + - docs/models/librariesdocumentsgetextractedtextsignedurlv1request.md + - docs/models/librariesdocumentsgetsignedurlv1request.md + - docs/models/librariesdocumentsgetstatusv1request.md + - docs/models/librariesdocumentsgettextcontentv1request.md + - docs/models/librariesdocumentsgetv1request.md + - docs/models/librariesdocumentslistv1request.md + - docs/models/librariesdocumentsreprocessv1request.md + - docs/models/librariesdocumentsupdatev1request.md + - docs/models/librariesdocumentsuploadv1documentupload.md + - docs/models/librariesdocumentsuploadv1request.md + - docs/models/librariesgetv1request.md + - docs/models/librariessharecreatev1request.md + - docs/models/librariessharedeletev1request.md + - docs/models/librariessharelistv1request.md + - docs/models/librariesupdatev1request.md + - docs/models/libraryin.md + - docs/models/libraryinupdate.md + - docs/models/libraryout.md + - docs/models/listdocumentout.md + - docs/models/listfilesout.md + - docs/models/listlibraryout.md + - docs/models/listsharingout.md + - docs/models/loc.md + - docs/models/messageentries.md + - docs/models/messageinputcontentchunks.md + - docs/models/messageinputentry.md + - docs/models/messageinputentrycontent.md + - docs/models/messageinputentryrole.md + - docs/models/messageinputentrytype.md + - docs/models/messageoutputcontentchunks.md + - docs/models/messageoutputentry.md + - docs/models/messageoutputentrycontent.md + - docs/models/messageoutputentryobject.md + - docs/models/messageoutputentryrole.md + - docs/models/messageoutputentrytype.md + - docs/models/messageoutputevent.md + - docs/models/messageoutputeventcontent.md + - docs/models/messageoutputeventrole.md + - docs/models/messageoutputeventtype.md + - docs/models/messages.md + - docs/models/metricout.md + - docs/models/mistralpromptmode.md + - docs/models/modelcapabilities.md + - docs/models/modelconversation.md + - docs/models/modelconversationobject.md + - docs/models/modelconversationtools.md + - docs/models/modellist.md + - docs/models/modeltype.md + - docs/models/moderationobject.md + - docs/models/moderationresponse.md + - docs/models/name.md + - docs/models/object.md + - docs/models/ocrimageobject.md + - docs/models/ocrpagedimensions.md + - docs/models/ocrpageobject.md + - docs/models/ocrrequest.md + - docs/models/ocrresponse.md + - docs/models/ocrtableobject.md + - docs/models/ocrusageinfo.md + - docs/models/one.md + - docs/models/outputcontentchunks.md + - docs/models/outputs.md + - docs/models/paginationinfo.md + - docs/models/prediction.md + - docs/models/processingstatusout.md + - docs/models/queryparamstatus.md + - docs/models/referencechunk.md + - docs/models/referencechunktype.md + - docs/models/repositories.md + - docs/models/requestsource.md + - docs/models/response1.md + - docs/models/responsebody.md + - docs/models/responsedoneevent.md + - docs/models/responsedoneeventtype.md + - docs/models/responseerrorevent.md + - docs/models/responseerroreventtype.md + - docs/models/responseformat.md + - docs/models/responseformats.md + - docs/models/responsestartedevent.md + - docs/models/responsestartedeventtype.md + - docs/models/retrievefileout.md + - docs/models/retrievemodelv1modelsmodelidgetrequest.md + - docs/models/retrievemodelv1modelsmodelidgetresponseretrievemodelv1modelsmodelidget.md + - docs/models/role.md + - docs/models/sampletype.md + - docs/models/security.md + - docs/models/shareenum.md + - docs/models/sharingdelete.md + - docs/models/sharingin.md + - docs/models/sharingout.md + - docs/models/source.md + - docs/models/ssetypes.md + - docs/models/status.md + - docs/models/stop.md + - docs/models/systemmessage.md + - docs/models/systemmessagecontent.md + - docs/models/systemmessagecontentchunks.md + - docs/models/tableformat.md + - docs/models/textchunk.md + - docs/models/textchunktype.md + - docs/models/thinkchunk.md + - docs/models/thinkchunktype.md + - docs/models/thinking.md + - docs/models/timestampgranularity.md + - docs/models/tool.md + - docs/models/toolcall.md + - docs/models/toolchoice.md + - docs/models/toolchoiceenum.md + - docs/models/toolexecutiondeltaevent.md + - docs/models/toolexecutiondeltaeventname.md + - docs/models/toolexecutiondeltaeventtype.md + - docs/models/toolexecutiondoneevent.md + - docs/models/toolexecutiondoneeventname.md + - docs/models/toolexecutiondoneeventtype.md + - docs/models/toolexecutionentry.md + - docs/models/toolexecutionentryobject.md + - docs/models/toolexecutionentrytype.md + - docs/models/toolexecutionstartedevent.md + - docs/models/toolexecutionstartedeventname.md + - docs/models/toolexecutionstartedeventtype.md + - docs/models/toolfilechunk.md + - docs/models/toolfilechunktype.md + - docs/models/toolmessage.md + - docs/models/toolmessagecontent.md + - docs/models/toolmessagerole.md + - docs/models/toolreferencechunk.md + - docs/models/toolreferencechunktype.md + - docs/models/tools.md + - docs/models/tooltypes.md + - docs/models/trainingfile.md + - docs/models/transcriptionresponse.md + - docs/models/transcriptionsegmentchunk.md + - docs/models/transcriptionstreamdone.md + - docs/models/transcriptionstreamdonetype.md + - docs/models/transcriptionstreamevents.md + - docs/models/transcriptionstreameventsdata.md + - docs/models/transcriptionstreameventtypes.md + - docs/models/transcriptionstreamlanguage.md + - docs/models/transcriptionstreamlanguagetype.md + - docs/models/transcriptionstreamsegmentdelta.md + - docs/models/transcriptionstreamsegmentdeltatype.md + - docs/models/transcriptionstreamtextdelta.md + - docs/models/transcriptionstreamtextdeltatype.md + - docs/models/two.md + - docs/models/type.md + - docs/models/unarchiveftmodelout.md + - docs/models/unarchiveftmodeloutobject.md + - docs/models/updateftmodelin.md + - docs/models/uploadfileout.md + - docs/models/usageinfo.md + - docs/models/usermessage.md + - docs/models/usermessagecontent.md + - docs/models/usermessagerole.md + - docs/models/utils/retryconfig.md + - docs/models/validationerror.md + - docs/models/wandbintegration.md + - docs/models/wandbintegrationout.md + - docs/models/wandbintegrationouttype.md + - docs/models/wandbintegrationtype.md + - docs/models/websearchpremiumtool.md + - docs/models/websearchpremiumtooltype.md + - docs/models/websearchtool.md + - docs/models/websearchtooltype.md + - docs/sdks/accesses/README.md + - docs/sdks/agents/README.md + - docs/sdks/audio/README.md + - docs/sdks/batch/README.md + - docs/sdks/beta/README.md + - docs/sdks/chat/README.md + - docs/sdks/classifiers/README.md + - docs/sdks/conversations/README.md + - docs/sdks/documents/README.md + - docs/sdks/embeddings/README.md + - docs/sdks/files/README.md + - docs/sdks/fim/README.md + - docs/sdks/finetuning/README.md + - docs/sdks/jobs/README.md + - docs/sdks/libraries/README.md + - docs/sdks/mistral/README.md + - docs/sdks/mistralagents/README.md + - docs/sdks/mistraljobs/README.md + - docs/sdks/models/README.md + - docs/sdks/ocr/README.md + - docs/sdks/transcriptions/README.md + - poetry.toml + - py.typed + - scripts/prepare_readme.py + - scripts/publish.sh + - src/mistralai/__init__.py + - src/mistralai/_hooks/__init__.py + - src/mistralai/_hooks/sdkhooks.py + - src/mistralai/_hooks/types.py + - src/mistralai/_version.py + - src/mistralai/accesses.py + - src/mistralai/agents.py + - src/mistralai/audio.py + - src/mistralai/basesdk.py + - src/mistralai/batch.py + - src/mistralai/beta.py + - src/mistralai/chat.py + - src/mistralai/classifiers.py + - src/mistralai/conversations.py + - src/mistralai/documents.py + - src/mistralai/embeddings.py + - src/mistralai/files.py + - src/mistralai/fim.py + - src/mistralai/fine_tuning.py + - src/mistralai/httpclient.py + - src/mistralai/jobs.py + - src/mistralai/libraries.py + - src/mistralai/mistral_agents.py + - src/mistralai/mistral_jobs.py + - src/mistralai/models/__init__.py + - src/mistralai/models/agent.py + - src/mistralai/models/agentconversation.py + - src/mistralai/models/agentcreationrequest.py + - src/mistralai/models/agenthandoffdoneevent.py + - src/mistralai/models/agenthandoffentry.py + - src/mistralai/models/agenthandoffstartedevent.py + - src/mistralai/models/agents_api_v1_agents_deleteop.py + - src/mistralai/models/agents_api_v1_agents_getop.py + - src/mistralai/models/agents_api_v1_agents_listop.py + - src/mistralai/models/agents_api_v1_agents_update_versionop.py + - src/mistralai/models/agents_api_v1_agents_updateop.py + - src/mistralai/models/agents_api_v1_conversations_append_streamop.py + - src/mistralai/models/agents_api_v1_conversations_appendop.py + - src/mistralai/models/agents_api_v1_conversations_deleteop.py + - src/mistralai/models/agents_api_v1_conversations_getop.py + - src/mistralai/models/agents_api_v1_conversations_historyop.py + - src/mistralai/models/agents_api_v1_conversations_listop.py + - src/mistralai/models/agents_api_v1_conversations_messagesop.py + - src/mistralai/models/agents_api_v1_conversations_restart_streamop.py + - src/mistralai/models/agents_api_v1_conversations_restartop.py + - src/mistralai/models/agentscompletionrequest.py + - src/mistralai/models/agentscompletionstreamrequest.py + - src/mistralai/models/agentupdaterequest.py + - src/mistralai/models/apiendpoint.py + - src/mistralai/models/archiveftmodelout.py + - src/mistralai/models/assistantmessage.py + - src/mistralai/models/audiochunk.py + - src/mistralai/models/audiotranscriptionrequest.py + - src/mistralai/models/audiotranscriptionrequeststream.py + - src/mistralai/models/basemodelcard.py + - src/mistralai/models/batcherror.py + - src/mistralai/models/batchjobin.py + - src/mistralai/models/batchjobout.py + - src/mistralai/models/batchjobsout.py + - src/mistralai/models/batchjobstatus.py + - src/mistralai/models/builtinconnectors.py + - src/mistralai/models/chatclassificationrequest.py + - src/mistralai/models/chatcompletionchoice.py + - src/mistralai/models/chatcompletionrequest.py + - src/mistralai/models/chatcompletionresponse.py + - src/mistralai/models/chatcompletionstreamrequest.py + - src/mistralai/models/chatmoderationrequest.py + - src/mistralai/models/checkpointout.py + - src/mistralai/models/classificationrequest.py + - src/mistralai/models/classificationresponse.py + - src/mistralai/models/classificationtargetresult.py + - src/mistralai/models/classifierdetailedjobout.py + - src/mistralai/models/classifierftmodelout.py + - src/mistralai/models/classifierjobout.py + - src/mistralai/models/classifiertargetin.py + - src/mistralai/models/classifiertargetout.py + - src/mistralai/models/classifiertrainingparameters.py + - src/mistralai/models/classifiertrainingparametersin.py + - src/mistralai/models/codeinterpretertool.py + - src/mistralai/models/completionargs.py + - src/mistralai/models/completionargsstop.py + - src/mistralai/models/completionchunk.py + - src/mistralai/models/completiondetailedjobout.py + - src/mistralai/models/completionevent.py + - src/mistralai/models/completionftmodelout.py + - src/mistralai/models/completionjobout.py + - src/mistralai/models/completionresponsestreamchoice.py + - src/mistralai/models/completiontrainingparameters.py + - src/mistralai/models/completiontrainingparametersin.py + - src/mistralai/models/contentchunk.py + - src/mistralai/models/conversationappendrequest.py + - src/mistralai/models/conversationappendstreamrequest.py + - src/mistralai/models/conversationevents.py + - src/mistralai/models/conversationhistory.py + - src/mistralai/models/conversationinputs.py + - src/mistralai/models/conversationmessages.py + - src/mistralai/models/conversationrequest.py + - src/mistralai/models/conversationresponse.py + - src/mistralai/models/conversationrestartrequest.py + - src/mistralai/models/conversationrestartstreamrequest.py + - src/mistralai/models/conversationstreamrequest.py + - src/mistralai/models/conversationusageinfo.py + - src/mistralai/models/delete_model_v1_models_model_id_deleteop.py + - src/mistralai/models/deletefileout.py + - src/mistralai/models/deletemodelout.py + - src/mistralai/models/deltamessage.py + - src/mistralai/models/documentlibrarytool.py + - src/mistralai/models/documentout.py + - src/mistralai/models/documenttextcontent.py + - src/mistralai/models/documentupdatein.py + - src/mistralai/models/documenturlchunk.py + - src/mistralai/models/embeddingdtype.py + - src/mistralai/models/embeddingrequest.py + - src/mistralai/models/embeddingresponse.py + - src/mistralai/models/embeddingresponsedata.py + - src/mistralai/models/encodingformat.py + - src/mistralai/models/entitytype.py + - src/mistralai/models/eventout.py + - src/mistralai/models/file.py + - src/mistralai/models/filechunk.py + - src/mistralai/models/filepurpose.py + - src/mistralai/models/files_api_routes_delete_fileop.py + - src/mistralai/models/files_api_routes_download_fileop.py + - src/mistralai/models/files_api_routes_get_signed_urlop.py + - src/mistralai/models/files_api_routes_list_filesop.py + - src/mistralai/models/files_api_routes_retrieve_fileop.py + - src/mistralai/models/files_api_routes_upload_fileop.py + - src/mistralai/models/fileschema.py + - src/mistralai/models/filesignedurl.py + - src/mistralai/models/fimcompletionrequest.py + - src/mistralai/models/fimcompletionresponse.py + - src/mistralai/models/fimcompletionstreamrequest.py + - src/mistralai/models/finetuneablemodeltype.py + - src/mistralai/models/ftclassifierlossfunction.py + - src/mistralai/models/ftmodelcapabilitiesout.py + - src/mistralai/models/ftmodelcard.py + - src/mistralai/models/function.py + - src/mistralai/models/functioncall.py + - src/mistralai/models/functioncallentry.py + - src/mistralai/models/functioncallentryarguments.py + - src/mistralai/models/functioncallevent.py + - src/mistralai/models/functionname.py + - src/mistralai/models/functionresultentry.py + - src/mistralai/models/functiontool.py + - src/mistralai/models/githubrepositoryin.py + - src/mistralai/models/githubrepositoryout.py + - src/mistralai/models/httpvalidationerror.py + - src/mistralai/models/imagegenerationtool.py + - src/mistralai/models/imageurl.py + - src/mistralai/models/imageurlchunk.py + - src/mistralai/models/inputentries.py + - src/mistralai/models/inputs.py + - src/mistralai/models/instructrequest.py + - src/mistralai/models/jobin.py + - src/mistralai/models/jobmetadataout.py + - src/mistralai/models/jobs_api_routes_batch_cancel_batch_jobop.py + - src/mistralai/models/jobs_api_routes_batch_get_batch_jobop.py + - src/mistralai/models/jobs_api_routes_batch_get_batch_jobsop.py + - src/mistralai/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py + - src/mistralai/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py + - src/mistralai/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py + - src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py + - src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py + - src/mistralai/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py + - src/mistralai/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py + - src/mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py + - src/mistralai/models/jobsout.py + - src/mistralai/models/jsonschema.py + - src/mistralai/models/legacyjobmetadataout.py + - src/mistralai/models/libraries_delete_v1op.py + - src/mistralai/models/libraries_documents_delete_v1op.py + - src/mistralai/models/libraries_documents_get_extracted_text_signed_url_v1op.py + - src/mistralai/models/libraries_documents_get_signed_url_v1op.py + - src/mistralai/models/libraries_documents_get_status_v1op.py + - src/mistralai/models/libraries_documents_get_text_content_v1op.py + - src/mistralai/models/libraries_documents_get_v1op.py + - src/mistralai/models/libraries_documents_list_v1op.py + - src/mistralai/models/libraries_documents_reprocess_v1op.py + - src/mistralai/models/libraries_documents_update_v1op.py + - src/mistralai/models/libraries_documents_upload_v1op.py + - src/mistralai/models/libraries_get_v1op.py + - src/mistralai/models/libraries_share_create_v1op.py + - src/mistralai/models/libraries_share_delete_v1op.py + - src/mistralai/models/libraries_share_list_v1op.py + - src/mistralai/models/libraries_update_v1op.py + - src/mistralai/models/libraryin.py + - src/mistralai/models/libraryinupdate.py + - src/mistralai/models/libraryout.py + - src/mistralai/models/listdocumentout.py + - src/mistralai/models/listfilesout.py + - src/mistralai/models/listlibraryout.py + - src/mistralai/models/listsharingout.py + - src/mistralai/models/messageentries.py + - src/mistralai/models/messageinputcontentchunks.py + - src/mistralai/models/messageinputentry.py + - src/mistralai/models/messageoutputcontentchunks.py + - src/mistralai/models/messageoutputentry.py + - src/mistralai/models/messageoutputevent.py + - src/mistralai/models/metricout.py + - src/mistralai/models/mistralerror.py + - src/mistralai/models/mistralpromptmode.py + - src/mistralai/models/modelcapabilities.py + - src/mistralai/models/modelconversation.py + - src/mistralai/models/modellist.py + - src/mistralai/models/moderationobject.py + - src/mistralai/models/moderationresponse.py + - src/mistralai/models/no_response_error.py + - src/mistralai/models/ocrimageobject.py + - src/mistralai/models/ocrpagedimensions.py + - src/mistralai/models/ocrpageobject.py + - src/mistralai/models/ocrrequest.py + - src/mistralai/models/ocrresponse.py + - src/mistralai/models/ocrtableobject.py + - src/mistralai/models/ocrusageinfo.py + - src/mistralai/models/outputcontentchunks.py + - src/mistralai/models/paginationinfo.py + - src/mistralai/models/prediction.py + - src/mistralai/models/processingstatusout.py + - src/mistralai/models/referencechunk.py + - src/mistralai/models/requestsource.py + - src/mistralai/models/responsedoneevent.py + - src/mistralai/models/responseerrorevent.py + - src/mistralai/models/responseformat.py + - src/mistralai/models/responseformats.py + - src/mistralai/models/responsestartedevent.py + - src/mistralai/models/responsevalidationerror.py + - src/mistralai/models/retrieve_model_v1_models_model_id_getop.py + - src/mistralai/models/retrievefileout.py + - src/mistralai/models/sampletype.py + - src/mistralai/models/sdkerror.py + - src/mistralai/models/security.py + - src/mistralai/models/shareenum.py + - src/mistralai/models/sharingdelete.py + - src/mistralai/models/sharingin.py + - src/mistralai/models/sharingout.py + - src/mistralai/models/source.py + - src/mistralai/models/ssetypes.py + - src/mistralai/models/systemmessage.py + - src/mistralai/models/systemmessagecontentchunks.py + - src/mistralai/models/textchunk.py + - src/mistralai/models/thinkchunk.py + - src/mistralai/models/timestampgranularity.py + - src/mistralai/models/tool.py + - src/mistralai/models/toolcall.py + - src/mistralai/models/toolchoice.py + - src/mistralai/models/toolchoiceenum.py + - src/mistralai/models/toolexecutiondeltaevent.py + - src/mistralai/models/toolexecutiondoneevent.py + - src/mistralai/models/toolexecutionentry.py + - src/mistralai/models/toolexecutionstartedevent.py + - src/mistralai/models/toolfilechunk.py + - src/mistralai/models/toolmessage.py + - src/mistralai/models/toolreferencechunk.py + - src/mistralai/models/tooltypes.py + - src/mistralai/models/trainingfile.py + - src/mistralai/models/transcriptionresponse.py + - src/mistralai/models/transcriptionsegmentchunk.py + - src/mistralai/models/transcriptionstreamdone.py + - src/mistralai/models/transcriptionstreamevents.py + - src/mistralai/models/transcriptionstreameventtypes.py + - src/mistralai/models/transcriptionstreamlanguage.py + - src/mistralai/models/transcriptionstreamsegmentdelta.py + - src/mistralai/models/transcriptionstreamtextdelta.py + - src/mistralai/models/unarchiveftmodelout.py + - src/mistralai/models/updateftmodelin.py + - src/mistralai/models/uploadfileout.py + - src/mistralai/models/usageinfo.py + - src/mistralai/models/usermessage.py + - src/mistralai/models/validationerror.py + - src/mistralai/models/wandbintegration.py + - src/mistralai/models/wandbintegrationout.py + - src/mistralai/models/websearchpremiumtool.py + - src/mistralai/models/websearchtool.py + - src/mistralai/models_.py + - src/mistralai/ocr.py + - src/mistralai/py.typed + - src/mistralai/sdk.py + - src/mistralai/sdkconfiguration.py + - src/mistralai/transcriptions.py + - src/mistralai/types/__init__.py + - src/mistralai/types/basemodel.py + - src/mistralai/utils/__init__.py + - src/mistralai/utils/annotations.py + - src/mistralai/utils/datetimes.py + - src/mistralai/utils/enums.py + - src/mistralai/utils/eventstreaming.py + - src/mistralai/utils/forms.py + - src/mistralai/utils/headers.py + - src/mistralai/utils/logger.py + - src/mistralai/utils/metadata.py + - src/mistralai/utils/queryparams.py + - src/mistralai/utils/requestbodies.py + - src/mistralai/utils/retries.py + - src/mistralai/utils/security.py + - src/mistralai/utils/serializers.py + - src/mistralai/utils/unmarshal_json_response.py + - src/mistralai/utils/url.py + - src/mistralai/utils/values.py diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml new file mode 100644 index 00000000..1c82d91a --- /dev/null +++ b/.speakeasy/gen.yaml @@ -0,0 +1,90 @@ +configVersion: 2.0.0 +generation: + sdkClassName: Mistral + maintainOpenAPIOrder: true + usageSnippets: + optionalPropertyRendering: withExample + sdkInitStyle: constructor + useClassNamesForArrayFields: true + fixes: + nameResolutionDec2023: true + nameResolutionFeb2025: true + parameterOrderingFeb2024: true + requestResponseComponentNamesFeb2024: true + securityFeb2025: true + sharedErrorComponentsApr2025: true + sharedNestedComponentsJan2026: true + nameOverrideFeb2026: true + methodSignaturesApr2024: true + auth: + oAuth2ClientCredentialsEnabled: true + oAuth2PasswordEnabled: false + hoistGlobalSecurity: true + schemas: + allOfMergeStrategy: shallowMerge + requestBodyFieldName: "" + versioningStrategy: automatic + persistentEdits: + enabled: "true" + tests: + generateTests: true + generateNewTests: false + skipResponseBodyAssertions: false +python: + version: 2.0.0rc1 + additionalDependencies: + dev: + pytest: ^8.2.2 + pytest-asyncio: ^0.23.7 + main: {} + allowedRedefinedBuiltins: + - id + - object + - input + - dir + asyncMode: both + authors: + - Mistral + baseErrorName: MistralError + clientServerStatusCodesAsErrors: true + constFieldCasing: normal + defaultErrorName: SDKError + description: Python Client SDK for the Mistral AI API. + enableCustomCodeRegions: true + enumFormat: union + envVarPrefix: MISTRAL + fixFlags: + asyncPaginationSep2025: true + conflictResistantModelImportsFeb2026: true + responseRequiredSep2024: true + flatAdditionalProperties: true + flattenGlobalSecurity: true + flattenRequests: true + flatteningOrder: parameters-first + forwardCompatibleEnumsByDefault: true + forwardCompatibleUnionsByDefault: tagged-only + imports: + option: openapi + paths: + callbacks: "" + errors: errors + operations: "" + shared: "" + webhooks: "" + inferUnionDiscriminators: true + inputModelSuffix: input + license: "" + maxMethodParams: 999 + methodArguments: infer-optional-args + moduleName: mistralai.client + multipartArrayFormat: standard + outputModelSuffix: output + packageManager: uv + packageName: mistralai + preApplyUnionDiscriminators: true + pytestFilterWarnings: [] + pytestTimeout: 0 + responseFormat: flat + sseFlatResponse: false + templateVersion: v2 + useAsyncHooks: false diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock new file mode 100644 index 00000000..0e0db8ba --- /dev/null +++ b/.speakeasy/workflow.lock @@ -0,0 +1,91 @@ +speakeasyVersion: 1.729.0 +sources: + mistral-azure-source: + sourceNamespace: mistral-openapi-azure + sourceRevisionDigest: sha256:0ac44fcf0cc7e7b8e101cb781a3699d5ccd37e0d80a5583c582ac7f0396803ac + sourceBlobDigest: sha256:4707ee1cbefa98516d39020e9cb29d7593fbd101a7d5c9c5fa50c44da3d4dce6 + tags: + - latest + - speakeasy-sdk-regen-1772457322 + mistral-google-cloud-source: + sourceNamespace: mistral-openapi-google-cloud + sourceRevisionDigest: sha256:b2ffad81388f92b1018cb5fe2d409920d795a4b9cd18d8d4147d06b6e53585eb + sourceBlobDigest: sha256:9fbc256371243c39284852f9c44ea59244205fe4265fddf867903d3eb60f17fd + tags: + - latest + - speakeasy-sdk-regen-1772455561 + mistral-openapi: + sourceNamespace: mistral-openapi + sourceRevisionDigest: sha256:52cd52dd6272c5afe08072790b36e34de9a65c41357bab87a45cf6635dc30db0 + sourceBlobDigest: sha256:7eb63e6d0b2226456aad34b5ae9edd75cc8e015643d478c09b717852e2852065 + tags: + - latest + - speakeasy-sdk-regen-1772205200 +targets: + mistralai-azure-sdk: + source: mistral-azure-source + sourceNamespace: mistral-openapi-azure + sourceRevisionDigest: sha256:0ac44fcf0cc7e7b8e101cb781a3699d5ccd37e0d80a5583c582ac7f0396803ac + sourceBlobDigest: sha256:4707ee1cbefa98516d39020e9cb29d7593fbd101a7d5c9c5fa50c44da3d4dce6 + codeSamplesNamespace: mistral-openapi-azure-code-samples + codeSamplesRevisionDigest: sha256:67fa6ead333ffe784141b51b231bca05411189fd92f2e5fe3b9f861b97db5942 + mistralai-gcp-sdk: + source: mistral-google-cloud-source + sourceNamespace: mistral-openapi-google-cloud + sourceRevisionDigest: sha256:b2ffad81388f92b1018cb5fe2d409920d795a4b9cd18d8d4147d06b6e53585eb + sourceBlobDigest: sha256:9fbc256371243c39284852f9c44ea59244205fe4265fddf867903d3eb60f17fd + codeSamplesNamespace: mistral-openapi-google-cloud-code-samples + codeSamplesRevisionDigest: sha256:79c3a6d89d6c6f01f0400a619531f4f1cf18875754ff488558c337535fd83ce9 + mistralai-sdk: + source: mistral-openapi + sourceNamespace: mistral-openapi + sourceRevisionDigest: sha256:52cd52dd6272c5afe08072790b36e34de9a65c41357bab87a45cf6635dc30db0 + sourceBlobDigest: sha256:7eb63e6d0b2226456aad34b5ae9edd75cc8e015643d478c09b717852e2852065 + codeSamplesNamespace: mistral-openapi-code-samples + codeSamplesRevisionDigest: sha256:534088a1428d166f80e9669ec6bc67d277e22113c745ef8904789f0c6e6381d9 +workflow: + workflowVersion: 1.0.0 + speakeasyVersion: 1.729.0 + sources: + mistral-azure-source: + inputs: + - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-azure:v2 + mistral-google-cloud-source: + inputs: + - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-google-cloud:v2 + mistral-openapi: + inputs: + - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi:v2 + targets: + mistralai-azure-sdk: + target: python + source: mistral-azure-source + output: ./packages/azure + publish: + pypi: + token: $pypi_token + codeSamples: + registry: + location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-azure-code-samples + blocking: false + mistralai-gcp-sdk: + target: python + source: mistral-google-cloud-source + output: ./packages/gcp + publish: + pypi: + token: $pypi_token + codeSamples: + registry: + location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-google-cloud-code-samples + blocking: false + mistralai-sdk: + target: python + source: mistral-openapi + publish: + pypi: + token: $pypi_token + codeSamples: + registry: + location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-code-samples + blocking: false diff --git a/.speakeasy/workflow.yaml b/.speakeasy/workflow.yaml new file mode 100644 index 00000000..65d6d202 --- /dev/null +++ b/.speakeasy/workflow.yaml @@ -0,0 +1,45 @@ +workflowVersion: 1.0.0 +speakeasyVersion: 1.729.0 +sources: + mistral-azure-source: + inputs: + - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-azure:v2 + mistral-google-cloud-source: + inputs: + - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-google-cloud:v2 + mistral-openapi: + inputs: + - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi:v2 +targets: + mistralai-azure-sdk: + target: python + source: mistral-azure-source + output: ./packages/azure + publish: + pypi: + token: $pypi_token + codeSamples: + registry: + location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-azure-code-samples + blocking: false + mistralai-gcp-sdk: + target: python + source: mistral-google-cloud-source + output: ./packages/gcp + publish: + pypi: + token: $pypi_token + codeSamples: + registry: + location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-google-cloud-code-samples + blocking: false + mistralai-sdk: + target: python + source: mistral-openapi + publish: + pypi: + token: $pypi_token + codeSamples: + registry: + location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-code-samples + blocking: false diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 00000000..d585717f --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,26 @@ +# Contributing to This Repository + +Thank you for your interest in contributing to this repository. Please note that this repository contains generated code. As such, we do not accept direct changes or pull requests. Instead, we encourage you to follow the guidelines below to report issues and suggest improvements. + +## How to Report Issues + +If you encounter any bugs or have suggestions for improvements, please open an issue on GitHub. When reporting an issue, please provide as much detail as possible to help us reproduce the problem. This includes: + +- A clear and descriptive title +- Steps to reproduce the issue +- Expected and actual behavior +- Any relevant logs, screenshots, or error messages +- Information about your environment (e.g., operating system, software versions) + - For example can be collected using the `npx envinfo` command from your terminal if you have Node.js installed + +## Issue Triage and Upstream Fixes + +We will review and triage issues as quickly as possible. Our goal is to address bugs and incorporate improvements in the upstream source code. Fixes will be included in the next generation of the generated code. + +## Contact + +If you have any questions or need further assistance, please feel free to reach out by opening an issue. + +Thank you for your understanding and cooperation! + +The Maintainers diff --git a/LICENSE b/LICENSE index 261eeb9e..bec12768 100644 --- a/LICENSE +++ b/LICENSE @@ -186,7 +186,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright [yyyy] [name of copyright owner] + Copyright 2024 Mistral AI Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/MIGRATION.md b/MIGRATION.md new file mode 100644 index 00000000..2fc3d13d --- /dev/null +++ b/MIGRATION.md @@ -0,0 +1,187 @@ +# Migration Guide + +This guide covers migrating between major versions of the Mistral Python SDK. + +--- + +## Migrating from v1.x to v2.x + +Version 2.0 updates the import paths from `mistralai` to `mistralai.client`. + +### Import Changes + +All imports move from `mistralai` to `mistralai.client`: + +```python +# v1 +from mistralai import Mistral +from mistralai.models import UserMessage, AssistantMessage +from mistralai.types import BaseModel + +# v2 +from mistralai.client import Mistral +from mistralai.client.models import UserMessage, AssistantMessage +from mistralai.client.types import BaseModel +``` + +### Quick Reference + +| v1 | v2 | +|---|---| +| `from mistralai import Mistral` | `from mistralai.client import Mistral` | +| `from mistralai.models import ...` | `from mistralai.client.models import ...` | +| `from mistralai.types import ...` | `from mistralai.client.types import ...` | +| `from mistralai.utils import ...` | `from mistralai.client.utils import ...` | + +### Azure & GCP Import Changes + +Azure and GCP SDKs now live under the `mistralai` namespace as separate distributions: + +| v1 | v2 | +|---|---| +| `from mistralai_azure import MistralAzure` | `from mistralai.azure.client import MistralAzure` | +| `from mistralai_azure.models import ...` | `from mistralai.azure.client.models import ...` | +| `from mistralai_gcp import MistralGoogleCloud` | `from mistralai.gcp.client import MistralGCP` | +| `from mistralai_gcp.models import ...` | `from mistralai.gcp.client.models import ...` | + +#### Installation Changes + +For GCP authentication dependencies, use `pip install "mistralai[gcp]"`. + +### What Stays the Same + +- The `Mistral` client API is unchanged +- All models (`UserMessage`, `AssistantMessage`, etc.) work the same way + +### Enums + +Enums now accept unknown values for forward compatibility with API changes. + +--- + +## Migrating from v0.x to v1.x + +Version 1.0 introduced significant changes to improve usability and consistency. + +> **Note:** The v1.x examples below use v1-style imports (e.g., `from mistralai import Mistral`). If you're on v2.x, combine these API changes with the [v1 to v2 import changes](#migrating-from-v1x-to-v2x) above. + +### Major Changes + +1. **Unified Client Class**: `MistralClient` and `MistralAsyncClient` consolidated into a single `Mistral` class +2. **Method Structure**: Methods reorganized into resource-based groups (e.g., `client.chat.complete()`) +3. **Message Classes**: `ChatMessage` replaced with typed classes (`UserMessage`, `AssistantMessage`, etc.) +4. **Streaming Response**: Stream chunks now accessed via `chunk.data.choices[0].delta.content` + +### Method Mapping + +#### Sync Methods + +| v0.x | v1.x | +|---|---| +| `MistralClient` | `Mistral` | +| `client.chat` | `client.chat.complete` | +| `client.chat_stream` | `client.chat.stream` | +| `client.completions` | `client.fim.complete` | +| `client.completions_stream` | `client.fim.stream` | +| `client.embeddings` | `client.embeddings.create` | +| `client.list_models` | `client.models.list` | +| `client.delete_model` | `client.models.delete` | +| `client.files.create` | `client.files.upload` | +| `client.jobs.create` | `client.fine_tuning.jobs.create` | +| `client.jobs.list` | `client.fine_tuning.jobs.list` | +| `client.jobs.retrieve` | `client.fine_tuning.jobs.get` | +| `client.jobs.cancel` | `client.fine_tuning.jobs.cancel` | + +#### Async Methods + +| v0.x | v1.x | +|---|---| +| `MistralAsyncClient` | `Mistral` | +| `async_client.chat` | `client.chat.complete_async` | +| `async_client.chat_stream` | `client.chat.stream_async` | +| `async_client.completions` | `client.fim.complete_async` | +| `async_client.completions_stream` | `client.fim.stream_async` | +| `async_client.embeddings` | `client.embeddings.create_async` | +| `async_client.list_models` | `client.models.list_async` | +| `async_client.files.create` | `client.files.upload_async` | +| `async_client.jobs.create` | `client.fine_tuning.jobs.create_async` | +| `async_client.jobs.list` | `client.fine_tuning.jobs.list_async` | +| `async_client.jobs.retrieve` | `client.fine_tuning.jobs.get_async` | +| `async_client.jobs.cancel` | `client.fine_tuning.jobs.cancel_async` | + +### Example: Non-Streaming Chat + +**v0.x:** +```python +from mistralai.client import MistralClient +from mistralai.models.chat_completion import ChatMessage + +client = MistralClient(api_key=api_key) + +messages = [ChatMessage(role="user", content="What is the best French cheese?")] +response = client.chat(model="mistral-large-latest", messages=messages) + +print(response.choices[0].message.content) +``` + +**v1.x:** +```python +from mistralai import Mistral, UserMessage + +client = Mistral(api_key=api_key) + +messages = [UserMessage(content="What is the best French cheese?")] +response = client.chat.complete(model="mistral-large-latest", messages=messages) + +print(response.choices[0].message.content) +``` + +### Example: Streaming Chat + +**v0.x:** +```python +from mistralai.client import MistralClient +from mistralai.models.chat_completion import ChatMessage + +client = MistralClient(api_key=api_key) +messages = [ChatMessage(role="user", content="What is the best French cheese?")] + +for chunk in client.chat_stream(model="mistral-large-latest", messages=messages): + print(chunk.choices[0].delta.content) +``` + +**v1.x:** +```python +from mistralai import Mistral, UserMessage + +client = Mistral(api_key=api_key) +messages = [UserMessage(content="What is the best French cheese?")] + +for chunk in client.chat.stream(model="mistral-large-latest", messages=messages): + print(chunk.data.choices[0].delta.content) # Note: chunk.data +``` + +### Example: Async Streaming + +**v0.x:** +```python +from mistralai.async_client import MistralAsyncClient +from mistralai.models.chat_completion import ChatMessage + +client = MistralAsyncClient(api_key=api_key) +messages = [ChatMessage(role="user", content="What is the best French cheese?")] + +async for chunk in client.chat_stream(model="mistral-large-latest", messages=messages): + print(chunk.choices[0].delta.content) +``` + +**v1.x:** +```python +from mistralai import Mistral, UserMessage + +client = Mistral(api_key=api_key) +messages = [UserMessage(content="What is the best French cheese?")] + +async for chunk in await client.chat.stream_async(model="mistral-large-latest", messages=messages): + print(chunk.data.choices[0].delta.content) +``` diff --git a/Makefile b/Makefile new file mode 100644 index 00000000..bba024ad --- /dev/null +++ b/Makefile @@ -0,0 +1,34 @@ +.PHONY: help generate test-generate update-speakeasy-version check-config + +help: + @echo "Available targets:" + @echo " make generate Generate all SDKs (main, Azure, GCP)" + @echo " make test-generate Test SDK generation locally" + @echo " make update-speakeasy-version VERSION=x.y.z Update Speakeasy CLI version" + @echo " make check-config Check gen.yaml against recommended defaults" + @echo "" + @echo "Note: Production SDK generation is done via GitHub Actions:" + @echo " .github/workflows/sdk_generation_mistralai_sdk.yaml" + +# Generate all SDKs (main, Azure, GCP) +generate: + speakeasy run -t all + +# Test SDK generation locally. +# For production, use GitHub Actions: .github/workflows/sdk_generation_mistralai_sdk.yaml +# This uses the Speakeasy CLI version defined in .speakeasy/workflow.yaml +test-generate: + speakeasy run --skip-versioning + +# Check gen.yaml configuration against Speakeasy recommended defaults +check-config: + speakeasy configure generation check + +# Update the Speakeasy CLI version (the code generator tool). +# This modifies speakeasyVersion in .speakeasy/workflow.yaml and regenerates the SDK. +# Usage: make update-speakeasy-version VERSION=1.685.0 +update-speakeasy-version: +ifndef VERSION + $(error VERSION is required. This is the Speakeasy CLI version (e.g., 1.685.0)) +endif + uv run inv update-speakeasy --version "$(VERSION)" --targets "all" diff --git a/OLD-README.md b/OLD-README.md new file mode 100644 index 00000000..22967f91 --- /dev/null +++ b/OLD-README.md @@ -0,0 +1,62 @@ +# Mistral Python Client + +This client is inspired from [cohere-python](https://github.com/cohere-ai/cohere-python) + +You can use the Mistral Python client to interact with the Mistral AI API. + +## Installing + +```bash +pip install mistralai +``` + +### From Source + +This client uses `poetry` as a dependency and virtual environment manager. + +You can install poetry with + +```bash +pip install poetry +``` + +`poetry` will set up a virtual environment and install dependencies with the following command: + +```bash +poetry install +``` + +## Run examples + +You can run the examples in the `examples/` directory using `poetry run` or by entering the virtual environment using `poetry shell`. + +### API Key Setup + +Running the examples requires a Mistral AI API key. + +1. Get your own Mistral API Key: +2. Set your Mistral API Key as an environment variable. You only need to do this once. + +```bash +# set Mistral API Key (using zsh for example) +$ echo 'export MISTRAL_API_KEY=[your_key_here]' >> ~/.zshenv + +# reload the environment (or just quit and open a new terminal) +$ source ~/.zshenv +``` + +### Using poetry run + +```bash +cd examples +poetry run python chat_no_streaming.py +``` + +### Using poetry shell + +```bash +poetry shell +cd examples + +>> python chat_no_streaming.py +``` diff --git a/README.md b/README.md index 73ad3185..dd98b5cc 100644 --- a/README.md +++ b/README.md @@ -1,47 +1,1007 @@ -This client is inspired from [cohere-python](https://github.com/cohere-ai/cohere-python) - # Mistral Python Client -You can use the Mistral Python client to interact with the Mistral AI API. +> [!IMPORTANT] +> **Looking for v1 documentation?** If you installed `mistralai` from PyPI (e.g., `pip install mistralai`), you are using **v1** of the SDK. The documentation on this branch (`main`) is for **v2**, which is not yet released on PyPI. +> +> **➡️ [Go to the v1 branch for v1 documentation](https://github.com/mistralai/client-python/tree/v1)** + +## Migration warning + +This documentation is for Mistral AI SDK v2. You can find more details on how to migrate from v1 to v2 [here](MIGRATION.md) + +## API Key Setup + +Before you begin, you will need a Mistral AI API key. + +1. Get your own Mistral API Key: +2. Set your Mistral API Key as an environment variable. You only need to do this once. + +```bash +# set Mistral API Key (using zsh for example) +$ echo 'export MISTRAL_API_KEY=[your_key_here]' >> ~/.zshenv + +# reload the environment (or just quit and open a new terminal) +$ source ~/.zshenv +``` + + +## Summary + +Mistral AI API: Our Chat Completion and Embeddings APIs specification. Create your account on [La Plateforme](https://console.mistral.ai) to get access and read the [docs](https://docs.mistral.ai) to learn how to use it. + + + +## Table of Contents + +* [Mistral Python Client](#mistral-python-client) + * [Migration warning](#migration-warning) + * [API Key Setup](#api-key-setup) + * [SDK Installation](#sdk-installation) + * [SDK Example Usage](#sdk-example-usage) + * [Providers' SDKs Example Usage](#providers-sdks-example-usage) + * [Available Resources and Operations](#available-resources-and-operations) + * [Server-sent event streaming](#server-sent-event-streaming) + * [File uploads](#file-uploads) + * [Retries](#retries) + * [Error Handling](#error-handling) + * [Server Selection](#server-selection) + * [Custom HTTP Client](#custom-http-client) + * [Authentication](#authentication) + * [Resource Management](#resource-management) + * [Debugging](#debugging) + * [IDE Support](#ide-support) +* [Development](#development) + * [Contributions](#contributions) + + + + +## SDK Installation + +> [!NOTE] +> **Python version upgrade policy** +> +> Once a Python version reaches its [official end of life date](https://devguide.python.org/versions/), a 3-month grace period is provided for users to upgrade. Following this grace period, the minimum python version supported in the SDK will be updated. + +The SDK can be installed with *uv*, *pip*, or *poetry* package managers. + +### uv + +*uv* is a fast Python package installer and resolver, designed as a drop-in replacement for pip and pip-tools. It's recommended for its speed and modern Python tooling capabilities. -## Installing +```bash +uv add mistralai +``` + +### PIP + +*PIP* is the default package installer for Python, enabling easy installation and management of packages from PyPI via the command line. ```bash pip install mistralai ``` -### From Source +### Poetry + +*Poetry* is a modern tool that simplifies dependency management and package publishing by using a single `pyproject.toml` file to handle project metadata and dependencies. + +```bash +poetry add mistralai +``` + +### Shell and script usage with `uv` + +You can use this SDK in a Python shell with [uv](https://docs.astral.sh/uv/) and the `uvx` command that comes with it like so: + +```shell +uvx --from mistralai python +``` + +It's also possible to write a standalone Python script without needing to set up a whole project like so: + +```python +#!/usr/bin/env -S uv run --script +# /// script +# requires-python = ">=3.10" +# dependencies = [ +# "mistralai", +# ] +# /// + +from mistralai.client import Mistral + +sdk = Mistral( + # SDK arguments +) + +# Rest of script here... +``` + +Once that is saved to a file, you can run it with `uv run script.py` where +`script.py` can be replaced with the actual file name. + -This client uses `poetry` as a dependency and virtual environment manager. +### Agents extra dependencies -You can install poetry with +When using the agents related feature it is required to add the `agents` extra dependencies. This can be added when +installing the package: ```bash -pip install poetry +pip install "mistralai[agents]" ``` -`poetry` will set up a virtual environment and install dependencies with the following command: +> Note: These features require Python 3.10+ (the SDK minimum). + +### Additional packages + +Additional `mistralai-*` packages (e.g. `mistralai-workflows`) can be installed separately and are available under the `mistralai` namespace: ```bash -poetry install +pip install mistralai-workflows +``` + + +## SDK Example Usage + +### Create Chat Completions + +This example shows how to create chat completions. + +```python +# Synchronous Example +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.chat.complete(model="mistral-large-latest", messages=[ + { + "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", + }, + ], stream=False, response_format={ + "type": "text", + }) + + # Handle response + print(res) +``` + +
+ +The same SDK client can also be used to make asynchronous requests by importing asyncio. + +```python +# Asynchronous Example +import asyncio +from mistralai.client import Mistral +import os + +async def main(): + + async with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), + ) as mistral: + + res = await mistral.chat.complete_async(model="mistral-large-latest", messages=[ + { + "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", + }, + ], stream=False, response_format={ + "type": "text", + }) + + # Handle response + print(res) + +asyncio.run(main()) +``` + +### Upload a file + +This example shows how to upload a file. + +```python +# Synchronous Example +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.files.upload(file={ + "file_name": "example.file", + "content": open("example.file", "rb"), + }) + + # Handle response + print(res) +``` + +
+ +The same SDK client can also be used to make asynchronous requests by importing asyncio. + +```python +# Asynchronous Example +import asyncio +from mistralai.client import Mistral +import os + +async def main(): + + async with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), + ) as mistral: + + res = await mistral.files.upload_async(file={ + "file_name": "example.file", + "content": open("example.file", "rb"), + }) + + # Handle response + print(res) + +asyncio.run(main()) ``` -## Run examples +### Create Agents Completions -You can run the examples in the `examples/` directory using `poetry run` or by entering the virtual environment using `poetry shell`. +This example shows how to create agents completions. -### Using poetry run +```python +# Synchronous Example +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.agents.complete(messages=[ + { + "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", + }, + ], agent_id="", stream=False, response_format={ + "type": "text", + }) + + # Handle response + print(res) +``` + +
+ +The same SDK client can also be used to make asynchronous requests by importing asyncio. + +```python +# Asynchronous Example +import asyncio +from mistralai.client import Mistral +import os + +async def main(): + + async with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), + ) as mistral: + + res = await mistral.agents.complete_async(messages=[ + { + "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", + }, + ], agent_id="", stream=False, response_format={ + "type": "text", + }) + + # Handle response + print(res) + +asyncio.run(main()) +``` + +### Create Embedding Request + +This example shows how to create embedding request. + +```python +# Synchronous Example +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.embeddings.create(model="mistral-embed", inputs=[ + "Embed this sentence.", + "As well as this one.", + ]) + + # Handle response + print(res) +``` + +
+ +The same SDK client can also be used to make asynchronous requests by importing asyncio. + +```python +# Asynchronous Example +import asyncio +from mistralai.client import Mistral +import os + +async def main(): + + async with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), + ) as mistral: + + res = await mistral.embeddings.create_async(model="mistral-embed", inputs=[ + "Embed this sentence.", + "As well as this one.", + ]) + + # Handle response + print(res) + +asyncio.run(main()) +``` + + + +### More examples + +You can run the examples in the `examples/` directory using `uv run`. + + +## Providers' SDKs Example Usage + +### Azure AI + +**Prerequisites** + +Before you begin, ensure you have `AZURE_ENDPOINT` and an `AZURE_API_KEY`. To obtain these, you will need to deploy Mistral on Azure AI. +See [instructions for deploying Mistral on Azure AI here](https://docs.mistral.ai/deployment/cloud/azure/). + +**Step 1: Install** ```bash -cd examples -poetry run python chat_no_streaming.py +pip install mistralai +``` + +**Step 2: Example Usage** + +Here's a basic example to get you started. You can also run [the example in the `examples` directory](/examples/azure). + +```python +import os +from mistralai.azure.client import MistralAzure + +# The SDK automatically injects api-version as a query parameter +client = MistralAzure( + api_key=os.environ["AZURE_API_KEY"], + server_url=os.environ["AZURE_ENDPOINT"], + api_version="2024-05-01-preview", # Optional, this is the default +) + +res = client.chat.complete( + model=os.environ["AZURE_MODEL"], + messages=[ + { + "role": "user", + "content": "Hello there!", + } + ], +) +print(res.choices[0].message.content) ``` -### Using poetry shell +### Google Cloud + + +**Prerequisites** + +Before you begin, you will need to create a Google Cloud project and enable the Mistral API. To do this, follow the instructions [here](https://docs.mistral.ai/deployment/cloud/vertex/). + +To run this locally you will also need to ensure you are authenticated with Google Cloud. You can do this by running ```bash -poetry shell -cd examples +gcloud auth application-default login +``` ->> python chat_no_streaming.py +**Step 1: Install** + +```bash +pip install mistralai +# For GCP authentication support (required): +pip install "mistralai[gcp]" ``` + +**Step 2: Example Usage** + +Here's a basic example to get you started. You can also run [the example in the `examples` directory](/examples/gcp). + +The SDK automatically: +- Detects credentials via `google.auth.default()` +- Auto-refreshes tokens when they expire +- Builds the Vertex AI URL from `project_id` and `region` + +```python +import os +from mistralai.gcp.client import MistralGCP + +# The SDK auto-detects credentials and builds the Vertex AI URL +client = MistralGCP( + project_id=os.environ.get("GCP_PROJECT_ID"), # Optional: auto-detected from credentials + region="us-central1", # Default: europe-west4 +) + +res = client.chat.complete( + model="mistral-small-2503", + messages=[ + { + "role": "user", + "content": "Hello there!", + } + ], +) +print(res.choices[0].message.content) +``` + + + +## Available Resources and Operations + +
+Available methods + +### [Agents](docs/sdks/agents/README.md) + +* [complete](docs/sdks/agents/README.md#complete) - Agents Completion +* [stream](docs/sdks/agents/README.md#stream) - Stream Agents completion + +### [Audio.Transcriptions](docs/sdks/transcriptions/README.md) + +* [complete](docs/sdks/transcriptions/README.md#complete) - Create Transcription +* [stream](docs/sdks/transcriptions/README.md#stream) - Create Streaming Transcription (SSE) + +### [Batch.Jobs](docs/sdks/batchjobs/README.md) + +* [list](docs/sdks/batchjobs/README.md#list) - Get Batch Jobs +* [create](docs/sdks/batchjobs/README.md#create) - Create Batch Job +* [get](docs/sdks/batchjobs/README.md#get) - Get Batch Job +* [cancel](docs/sdks/batchjobs/README.md#cancel) - Cancel Batch Job + +### [Beta.Agents](docs/sdks/betaagents/README.md) + +* [create](docs/sdks/betaagents/README.md#create) - Create a agent that can be used within a conversation. +* [list](docs/sdks/betaagents/README.md#list) - List agent entities. +* [get](docs/sdks/betaagents/README.md#get) - Retrieve an agent entity. +* [update](docs/sdks/betaagents/README.md#update) - Update an agent entity. +* [delete](docs/sdks/betaagents/README.md#delete) - Delete an agent entity. +* [update_version](docs/sdks/betaagents/README.md#update_version) - Update an agent version. +* [list_versions](docs/sdks/betaagents/README.md#list_versions) - List all versions of an agent. +* [get_version](docs/sdks/betaagents/README.md#get_version) - Retrieve a specific version of an agent. +* [create_version_alias](docs/sdks/betaagents/README.md#create_version_alias) - Create or update an agent version alias. +* [list_version_aliases](docs/sdks/betaagents/README.md#list_version_aliases) - List all aliases for an agent. +* [delete_version_alias](docs/sdks/betaagents/README.md#delete_version_alias) - Delete an agent version alias. + +### [Beta.Conversations](docs/sdks/conversations/README.md) + +* [start](docs/sdks/conversations/README.md#start) - Create a conversation and append entries to it. +* [list](docs/sdks/conversations/README.md#list) - List all created conversations. +* [get](docs/sdks/conversations/README.md#get) - Retrieve a conversation information. +* [delete](docs/sdks/conversations/README.md#delete) - Delete a conversation. +* [append](docs/sdks/conversations/README.md#append) - Append new entries to an existing conversation. +* [get_history](docs/sdks/conversations/README.md#get_history) - Retrieve all entries in a conversation. +* [get_messages](docs/sdks/conversations/README.md#get_messages) - Retrieve all messages in a conversation. +* [restart](docs/sdks/conversations/README.md#restart) - Restart a conversation starting from a given entry. +* [start_stream](docs/sdks/conversations/README.md#start_stream) - Create a conversation and append entries to it. +* [append_stream](docs/sdks/conversations/README.md#append_stream) - Append new entries to an existing conversation. +* [restart_stream](docs/sdks/conversations/README.md#restart_stream) - Restart a conversation starting from a given entry. + +### [Beta.Libraries](docs/sdks/libraries/README.md) + +* [list](docs/sdks/libraries/README.md#list) - List all libraries you have access to. +* [create](docs/sdks/libraries/README.md#create) - Create a new Library. +* [get](docs/sdks/libraries/README.md#get) - Detailed information about a specific Library. +* [delete](docs/sdks/libraries/README.md#delete) - Delete a library and all of it's document. +* [update](docs/sdks/libraries/README.md#update) - Update a library. + +#### [Beta.Libraries.Accesses](docs/sdks/accesses/README.md) + +* [list](docs/sdks/accesses/README.md#list) - List all of the access to this library. +* [update_or_create](docs/sdks/accesses/README.md#update_or_create) - Create or update an access level. +* [delete](docs/sdks/accesses/README.md#delete) - Delete an access level. + +#### [Beta.Libraries.Documents](docs/sdks/documents/README.md) + +* [list](docs/sdks/documents/README.md#list) - List documents in a given library. +* [upload](docs/sdks/documents/README.md#upload) - Upload a new document. +* [get](docs/sdks/documents/README.md#get) - Retrieve the metadata of a specific document. +* [update](docs/sdks/documents/README.md#update) - Update the metadata of a specific document. +* [delete](docs/sdks/documents/README.md#delete) - Delete a document. +* [text_content](docs/sdks/documents/README.md#text_content) - Retrieve the text content of a specific document. +* [status](docs/sdks/documents/README.md#status) - Retrieve the processing status of a specific document. +* [get_signed_url](docs/sdks/documents/README.md#get_signed_url) - Retrieve the signed URL of a specific document. +* [extracted_text_signed_url](docs/sdks/documents/README.md#extracted_text_signed_url) - Retrieve the signed URL of text extracted from a given document. +* [reprocess](docs/sdks/documents/README.md#reprocess) - Reprocess a document. + +### [Chat](docs/sdks/chat/README.md) + +* [complete](docs/sdks/chat/README.md#complete) - Chat Completion +* [stream](docs/sdks/chat/README.md#stream) - Stream chat completion + +### [Classifiers](docs/sdks/classifiers/README.md) + +* [moderate](docs/sdks/classifiers/README.md#moderate) - Moderations +* [moderate_chat](docs/sdks/classifiers/README.md#moderate_chat) - Chat Moderations +* [classify](docs/sdks/classifiers/README.md#classify) - Classifications +* [classify_chat](docs/sdks/classifiers/README.md#classify_chat) - Chat Classifications + +### [Embeddings](docs/sdks/embeddings/README.md) + +* [create](docs/sdks/embeddings/README.md#create) - Embeddings + +### [Files](docs/sdks/files/README.md) + +* [upload](docs/sdks/files/README.md#upload) - Upload File +* [list](docs/sdks/files/README.md#list) - List Files +* [retrieve](docs/sdks/files/README.md#retrieve) - Retrieve File +* [delete](docs/sdks/files/README.md#delete) - Delete File +* [download](docs/sdks/files/README.md#download) - Download File +* [get_signed_url](docs/sdks/files/README.md#get_signed_url) - Get Signed Url + +### [Fim](docs/sdks/fim/README.md) + +* [complete](docs/sdks/fim/README.md#complete) - Fim Completion +* [stream](docs/sdks/fim/README.md#stream) - Stream fim completion + +### [FineTuning.Jobs](docs/sdks/finetuningjobs/README.md) + +* [list](docs/sdks/finetuningjobs/README.md#list) - Get Fine Tuning Jobs +* [create](docs/sdks/finetuningjobs/README.md#create) - Create Fine Tuning Job +* [get](docs/sdks/finetuningjobs/README.md#get) - Get Fine Tuning Job +* [cancel](docs/sdks/finetuningjobs/README.md#cancel) - Cancel Fine Tuning Job +* [start](docs/sdks/finetuningjobs/README.md#start) - Start Fine Tuning Job + +### [Models](docs/sdks/models/README.md) + +* [list](docs/sdks/models/README.md#list) - List Models +* [retrieve](docs/sdks/models/README.md#retrieve) - Retrieve Model +* [delete](docs/sdks/models/README.md#delete) - Delete Model +* [update](docs/sdks/models/README.md#update) - Update Fine Tuned Model +* [archive](docs/sdks/models/README.md#archive) - Archive Fine Tuned Model +* [unarchive](docs/sdks/models/README.md#unarchive) - Unarchive Fine Tuned Model + +### [Ocr](docs/sdks/ocr/README.md) + +* [process](docs/sdks/ocr/README.md#process) - OCR + +
+ + + +## Server-sent event streaming + +[Server-sent events][mdn-sse] are used to stream content from certain +operations. These operations will expose the stream as [Generator][generator] that +can be consumed using a simple `for` loop. The loop will +terminate when the server no longer has any events to send and closes the +underlying connection. + +The stream is also a [Context Manager][context-manager] and can be used with the `with` statement and will close the +underlying connection when the context is exited. + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.conversations.start_stream(inputs=[ + { + "object": "entry", + "type": "function.result", + "tool_call_id": "", + "result": "", + }, + ], stream=True, completion_args={ + "response_format": { + "type": "text", + }, + }) + + with res as event_stream: + for event in event_stream: + # handle event + print(event, flush=True) + +``` + +[mdn-sse]: https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events +[generator]: https://book.pythontips.com/en/latest/generators.html +[context-manager]: https://book.pythontips.com/en/latest/context_managers.html + + + +## File uploads + +Certain SDK methods accept file objects as part of a request body or multi-part request. It is possible and typically recommended to upload files as a stream rather than reading the entire contents into memory. This avoids excessive memory consumption and potentially crashing with out-of-memory errors when working with very large files. The following example demonstrates how to attach a file stream to a request. + +> [!TIP] +> +> For endpoints that handle file uploads bytes arrays can also be used. However, using streams is recommended for large files. +> + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.libraries.documents.upload(library_id="a02150d9-5ee0-4877-b62c-28b1fcdf3b76", file={ + "file_name": "example.file", + "content": open("example.file", "rb"), + }) + + # Handle response + print(res) + +``` + + + +## Retries + +Some of the endpoints in this SDK support retries. If you use the SDK without any configuration, it will fall back to the default retry strategy provided by the API. However, the default retry strategy can be overridden on a per-operation basis, or across the entire SDK. + +To change the default retry strategy for a single API call, simply provide a `RetryConfig` object to the call: +```python +from mistralai.client import Mistral +from mistralai.client.utils import BackoffStrategy, RetryConfig +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.models.list(, + RetryConfig("backoff", BackoffStrategy(1, 50, 1.1, 100), False)) + + # Handle response + print(res) + +``` + +If you'd like to override the default retry strategy for all operations that support retries, you can use the `retry_config` optional parameter when initializing the SDK: +```python +from mistralai.client import Mistral +from mistralai.client.utils import BackoffStrategy, RetryConfig +import os + + +with Mistral( + retry_config=RetryConfig("backoff", BackoffStrategy(1, 50, 1.1, 100), False), + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.models.list() + + # Handle response + print(res) + +``` + + + +## Error Handling + +[`MistralError`](./src/mistralai/client/errors/mistralerror.py) is the base class for all HTTP error responses. It has the following properties: + +| Property | Type | Description | +| ------------------ | ---------------- | --------------------------------------------------------------------------------------- | +| `err.message` | `str` | Error message | +| `err.status_code` | `int` | HTTP response status code eg `404` | +| `err.headers` | `httpx.Headers` | HTTP response headers | +| `err.body` | `str` | HTTP body. Can be empty string if no body is returned. | +| `err.raw_response` | `httpx.Response` | Raw HTTP response | +| `err.data` | | Optional. Some errors may contain structured data. [See Error Classes](#error-classes). | + +### Example +```python +from mistralai.client import Mistral, errors +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + res = None + try: + + res = mistral.models.retrieve(model_id="ft:open-mistral-7b:587a6b29:20240514:7e773925") + + # Handle response + print(res) + + + except errors.MistralError as e: + # The base class for HTTP error responses + print(e.message) + print(e.status_code) + print(e.body) + print(e.headers) + print(e.raw_response) + + # Depending on the method different errors may be thrown + if isinstance(e, errors.HTTPValidationError): + print(e.data.detail) # Optional[List[models.ValidationError]] +``` + +### Error Classes +**Primary error:** +* [`MistralError`](./src/mistralai/client/errors/mistralerror.py): The base class for HTTP error responses. + +
Less common errors (6) + +
+ +**Network errors:** +* [`httpx.RequestError`](https://www.python-httpx.org/exceptions/#httpx.RequestError): Base class for request errors. + * [`httpx.ConnectError`](https://www.python-httpx.org/exceptions/#httpx.ConnectError): HTTP client was unable to make a request to a server. + * [`httpx.TimeoutException`](https://www.python-httpx.org/exceptions/#httpx.TimeoutException): HTTP request timed out. + + +**Inherit from [`MistralError`](./src/mistralai/client/errors/mistralerror.py)**: +* [`HTTPValidationError`](./src/mistralai/client/errors/httpvalidationerror.py): Validation Error. Status code `422`. Applicable to 53 of 75 methods.* +* [`ResponseValidationError`](./src/mistralai/client/errors/responsevalidationerror.py): Type mismatch between the response data and the expected Pydantic model. Provides access to the Pydantic validation error via the `cause` attribute. + +
+ +\* Check [the method documentation](#available-resources-and-operations) to see if the error is applicable. + + + +## Server Selection + +### Select Server by Name + +You can override the default server globally by passing a server name to the `server: str` optional parameter when initializing the SDK client instance. The selected server will then be used as the default on the operations that use it. This table lists the names associated with the available servers: + +| Name | Server | Description | +| ---- | ------------------------ | -------------------- | +| `eu` | `https://api.mistral.ai` | EU Production server | + +#### Example + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + server="eu", + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.models.list() + + # Handle response + print(res) + +``` + +### Override Server URL Per-Client + +The default server can also be overridden globally by passing a URL to the `server_url: str` optional parameter when initializing the SDK client instance. For example: +```python +from mistralai.client import Mistral +import os + + +with Mistral( + server_url="https://api.mistral.ai", + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.models.list() + + # Handle response + print(res) + +``` + + + +## Custom HTTP Client + +The Python SDK makes API calls using the [httpx](https://www.python-httpx.org/) HTTP library. In order to provide a convenient way to configure timeouts, cookies, proxies, custom headers, and other low-level configuration, you can initialize the SDK client with your own HTTP client instance. +Depending on whether you are using the sync or async version of the SDK, you can pass an instance of `HttpClient` or `AsyncHttpClient` respectively, which are Protocol's ensuring that the client has the necessary methods to make API calls. +This allows you to wrap the client with your own custom logic, such as adding custom headers, logging, or error handling, or you can just pass an instance of `httpx.Client` or `httpx.AsyncClient` directly. + +For example, you could specify a header for every request that this sdk makes as follows: +```python +from mistralai.client import Mistral +import httpx + +http_client = httpx.Client(headers={"x-custom-header": "someValue"}) +s = Mistral(client=http_client) +``` + +or you could wrap the client with your own custom logic: +```python +from mistralai.client import Mistral +from mistralai.client.httpclient import AsyncHttpClient +import httpx + +class CustomClient(AsyncHttpClient): + client: AsyncHttpClient + + def __init__(self, client: AsyncHttpClient): + self.client = client + + async def send( + self, + request: httpx.Request, + *, + stream: bool = False, + auth: Union[ + httpx._types.AuthTypes, httpx._client.UseClientDefault, None + ] = httpx.USE_CLIENT_DEFAULT, + follow_redirects: Union[ + bool, httpx._client.UseClientDefault + ] = httpx.USE_CLIENT_DEFAULT, + ) -> httpx.Response: + request.headers["Client-Level-Header"] = "added by client" + + return await self.client.send( + request, stream=stream, auth=auth, follow_redirects=follow_redirects + ) + + def build_request( + self, + method: str, + url: httpx._types.URLTypes, + *, + content: Optional[httpx._types.RequestContent] = None, + data: Optional[httpx._types.RequestData] = None, + files: Optional[httpx._types.RequestFiles] = None, + json: Optional[Any] = None, + params: Optional[httpx._types.QueryParamTypes] = None, + headers: Optional[httpx._types.HeaderTypes] = None, + cookies: Optional[httpx._types.CookieTypes] = None, + timeout: Union[ + httpx._types.TimeoutTypes, httpx._client.UseClientDefault + ] = httpx.USE_CLIENT_DEFAULT, + extensions: Optional[httpx._types.RequestExtensions] = None, + ) -> httpx.Request: + return self.client.build_request( + method, + url, + content=content, + data=data, + files=files, + json=json, + params=params, + headers=headers, + cookies=cookies, + timeout=timeout, + extensions=extensions, + ) + +s = Mistral(async_client=CustomClient(httpx.AsyncClient())) +``` + + + +## Authentication + +### Per-Client Security Schemes + +This SDK supports the following security scheme globally: + +| Name | Type | Scheme | Environment Variable | +| --------- | ---- | ----------- | -------------------- | +| `api_key` | http | HTTP Bearer | `MISTRAL_API_KEY` | + +To authenticate with the API the `api_key` parameter must be set when initializing the SDK client instance. For example: +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.models.list() + + # Handle response + print(res) + +``` + + + +## Resource Management + +The `Mistral` class implements the context manager protocol and registers a finalizer function to close the underlying sync and async HTTPX clients it uses under the hood. This will close HTTP connections, release memory and free up other resources held by the SDK. In short-lived Python programs and notebooks that make a few SDK method calls, resource management may not be a concern. However, in longer-lived programs, it is beneficial to create a single SDK instance via a [context manager][context-manager] and reuse it across the application. + +[context-manager]: https://docs.python.org/3/reference/datamodel.html#context-managers + +```python +from mistralai.client import Mistral +import os +def main(): + + with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), + ) as mistral: + # Rest of application here... + + +# Or when using async: +async def amain(): + + async with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), + ) as mistral: + # Rest of application here... +``` + + + +## Debugging + +You can setup your SDK to emit debug logs for SDK requests and responses. + +You can pass your own logger class directly into your SDK. +```python +from mistralai.client import Mistral +import logging + +logging.basicConfig(level=logging.DEBUG) +s = Mistral(debug_logger=logging.getLogger("mistralai.client")) +``` + +You can also enable a default debug logger by setting an environment variable `MISTRAL_DEBUG` to true. + + + +## IDE Support + +### PyCharm + +Generally, the SDK will work well with most IDEs out of the box. However, when using PyCharm, you can enjoy much better integration with Pydantic by installing an additional plugin. + +- [PyCharm Pydantic Plugin](https://docs.pydantic.dev/latest/integrations/pycharm/) + + + + +# Development + +## Contributions + +While we value open-source contributions to this SDK, this library is generated programmatically. Any manual changes added to internal files will be overwritten on the next generation. +We look forward to hearing your feedback. Feel free to open a PR or an issue with a proof of concept and we'll do our best to include it in a future release. diff --git a/RELEASES.md b/RELEASES.md new file mode 100644 index 00000000..1a631692 --- /dev/null +++ b/RELEASES.md @@ -0,0 +1,391 @@ + + +## 2024-08-07 14:25:13 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.356.0 (2.388.1) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.0.0] . +### Releases +- [PyPI v1.0.0] https://pypi.org/project/mistralai/1.0.0 - . + +## 2024-08-08 18:12:16 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.357.4 (2.390.6) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.0.1] . +### Releases +- [PyPI v1.0.1] https://pypi.org/project/mistralai/1.0.1 - . + +## 2024-08-20 08:36:28 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.376.0 (2.402.5) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.0.2] . +### Releases +- [PyPI v1.0.2] https://pypi.org/project/mistralai/1.0.2 - . + +## 2024-08-29 09:09:05 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.382.0 (2.404.11) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.0.3] . +### Releases +- [PyPI v1.0.3] https://pypi.org/project/mistralai/1.0.3 - . + +## 2024-09-13 16:21:24 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.396.7 (2.415.6) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.1.0] . +### Releases +- [PyPI v1.1.0] https://pypi.org/project/mistralai/1.1.0 - . + +## 2024-11-07 19:52:56 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.434.3 (2.452.0) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.2.0] . +### Releases +- [PyPI v1.2.0] https://pypi.org/project/mistralai/1.2.0 - . + +## 2024-11-08 13:41:24 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.434.7 (2.452.0) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.2.1] . +### Releases +- [PyPI v1.2.1] https://pypi.org/project/mistralai/1.2.1 - . + +## 2024-11-12 18:04:16 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.438.1 (2.457.2) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.2.2] . +### Releases +- [PyPI v1.2.2] https://pypi.org/project/mistralai/1.2.2 - . + +## 2024-11-15 18:37:23 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.440.1 (2.460.1) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.2.3] . +### Releases +- [PyPI v1.2.3] https://pypi.org/project/mistralai/1.2.3 - . + +## 2024-12-02 14:25:56 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.440.1 (2.460.1) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.2.4] . +### Releases +- [PyPI v1.2.4] https://pypi.org/project/mistralai/1.2.4 - . + +## 2024-12-04 15:14:08 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.451.1 (2.470.1) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.2.5] . +### Releases +- [PyPI v1.2.5] https://pypi.org/project/mistralai/1.2.5 - . + +## 2025-01-06 09:57:47 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.462.2 (2.486.1) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.2.6] . +### Releases +- [PyPI v1.2.6] https://pypi.org/project/mistralai/1.2.6 - . + +## 2025-01-14 09:35:05 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.468.5 (2.493.11) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.3.0] . +### Releases +- [PyPI v1.3.0] https://pypi.org/project/mistralai/1.3.0 - . + +## 2025-01-15 10:44:07 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.462.2 (2.486.1) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.3.1] . +### Releases +- [PyPI v1.3.1] https://pypi.org/project/mistralai/1.3.1 - . + +## 2025-01-21 11:09:53 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.469.11 (2.493.32) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.4.0] . +### Releases +- [PyPI v1.4.0] https://pypi.org/project/mistralai/1.4.0 - . + +## 2025-01-27 13:57:39 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.476.2 (2.495.1) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.5.0] . +### Releases +- [PyPI v1.5.0] https://pypi.org/project/mistralai/1.5.0 - . + +## 2025-03-06 16:38:57 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.477.0 (2.497.0) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.5.1] . +### Releases +- [PyPI v1.5.1] https://pypi.org/project/mistralai/1.5.1 - . + +## 2025-03-19 18:09:29 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.477.0 (2.497.0) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.5.2] . +### Releases +- [PyPI v1.5.2] https://pypi.org/project/mistralai/1.5.2 - . + +## 2025-03-20 10:34:02 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.517.3 (2.548.6) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.6.0] . +### Releases +- [PyPI v1.6.0] https://pypi.org/project/mistralai/1.6.0 - . + +## 2025-04-16 18:35:19 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.517.3 (2.548.6) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.7.0] . +### Releases +- [PyPI v1.7.0] https://pypi.org/project/mistralai/1.7.0 - . + +## 2025-05-22 15:03:08 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.517.3 (2.548.6) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.7.1] . +### Releases +- [PyPI v1.7.1] https://pypi.org/project/mistralai/1.7.1 - . + +## 2025-05-26 11:05:08 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.517.3 (2.548.6) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.8.0] . +### Releases +- [PyPI v1.8.0] https://pypi.org/project/mistralai/1.8.0 - . + +## 2025-05-28 15:38:22 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.517.3 (2.548.6) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.8.1] . +### Releases +- [PyPI v1.8.1] https://pypi.org/project/mistralai/1.8.1 - . + +## 2025-06-10 16:42:28 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.517.3 (2.548.6) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.8.2] . +### Releases +- [PyPI v1.8.2] https://pypi.org/project/mistralai/1.8.2 - . + +## 2025-06-30 17:56:20 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.568.2 (2.634.2) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.9.1] . +### Releases +- [PyPI v1.9.1] https://pypi.org/project/mistralai/1.9.1 - . + +## 2025-07-10 12:22:52 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.568.2 (2.634.2) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.9.2] . +### Releases +- [PyPI v1.9.2] https://pypi.org/project/mistralai/1.9.2 - . + +## 2025-07-23 17:06:32 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.568.2 (2.634.2) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.9.3] . +### Releases +- [PyPI v1.9.3] https://pypi.org/project/mistralai/1.9.3 - . + +## 2025-08-13 07:21:11 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.568.2 (2.634.2) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.9.6] . +### Releases +- [PyPI v1.9.6] https://pypi.org/project/mistralai/1.9.6 - . + +## 2025-08-20 08:28:00 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.568.2 (2.634.2) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.9.7] . +### Releases +- [PyPI v1.9.7] https://pypi.org/project/mistralai/1.9.7 - . + +## 2025-08-25 14:54:06 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.568.2 (2.634.2) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.9.8] . +### Releases +- [PyPI v1.9.8] https://pypi.org/project/mistralai/1.9.8 - . + +## 2025-08-26 17:34:05 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.568.2 (2.634.2) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.9.9] . +### Releases +- [PyPI v1.9.9] https://pypi.org/project/mistralai/1.9.9 - . + +## 2025-09-02 07:02:26 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.568.2 (2.634.2) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.9.10] . +### Releases +- [PyPI v1.9.10] https://pypi.org/project/mistralai/1.9.10 - . + +## 2025-10-02 15:48:02 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.606.10 (2.687.13) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.9.11] . +### Releases +- [PyPI v1.9.11] https://pypi.org/project/mistralai/1.9.11 - . + +## 2025-12-16 19:44:09 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.606.10 (2.687.13) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.10.0] . +### Releases +- [PyPI v1.10.0] https://pypi.org/project/mistralai/1.10.0 - . + +## 2026-01-15 18:39:22 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.606.10 (2.687.13) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.10.1] . +### Releases +- [PyPI v1.10.1] https://pypi.org/project/mistralai/1.10.1 - . + +## 2026-01-22 11:16:25 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.685.0 (2.794.1) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.11.1] . +### Releases +- [PyPI v1.11.1] https://pypi.org/project/mistralai/1.11.1 - . + +## 2026-02-01 21:20:42 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.685.0 (2.794.1) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.12.0] . +### Releases +- [PyPI v1.12.0] https://pypi.org/project/mistralai/1.12.0 - . + +## 2026-02-25 17:32:05 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.729.0 (2.841.0) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v2.0.0b1] . +### Releases +- [PyPI v2.0.0b1] https://pypi.org/project/mistralai/2.0.0b1 - . + +## 2026-02-27 15:12:59 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.729.0 (2.841.0) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v2.0.0rc1] . +### Releases +- [PyPI v2.0.0rc1] https://pypi.org/project/mistralai/2.0.0rc1 - . \ No newline at end of file diff --git a/USAGE.md b/USAGE.md new file mode 100644 index 00000000..f71bbabc --- /dev/null +++ b/USAGE.md @@ -0,0 +1,217 @@ + +### Create Chat Completions + +This example shows how to create chat completions. + +```python +# Synchronous Example +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.chat.complete(model="mistral-large-latest", messages=[ + { + "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", + }, + ], stream=False, response_format={ + "type": "text", + }) + + # Handle response + print(res) +``` + +
+ +The same SDK client can also be used to make asynchronous requests by importing asyncio. + +```python +# Asynchronous Example +import asyncio +from mistralai.client import Mistral +import os + +async def main(): + + async with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), + ) as mistral: + + res = await mistral.chat.complete_async(model="mistral-large-latest", messages=[ + { + "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", + }, + ], stream=False, response_format={ + "type": "text", + }) + + # Handle response + print(res) + +asyncio.run(main()) +``` + +### Upload a file + +This example shows how to upload a file. + +```python +# Synchronous Example +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.files.upload(file={ + "file_name": "example.file", + "content": open("example.file", "rb"), + }) + + # Handle response + print(res) +``` + +
+ +The same SDK client can also be used to make asynchronous requests by importing asyncio. + +```python +# Asynchronous Example +import asyncio +from mistralai.client import Mistral +import os + +async def main(): + + async with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), + ) as mistral: + + res = await mistral.files.upload_async(file={ + "file_name": "example.file", + "content": open("example.file", "rb"), + }) + + # Handle response + print(res) + +asyncio.run(main()) +``` + +### Create Agents Completions + +This example shows how to create agents completions. + +```python +# Synchronous Example +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.agents.complete(messages=[ + { + "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", + }, + ], agent_id="", stream=False, response_format={ + "type": "text", + }) + + # Handle response + print(res) +``` + +
+ +The same SDK client can also be used to make asynchronous requests by importing asyncio. + +```python +# Asynchronous Example +import asyncio +from mistralai.client import Mistral +import os + +async def main(): + + async with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), + ) as mistral: + + res = await mistral.agents.complete_async(messages=[ + { + "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", + }, + ], agent_id="", stream=False, response_format={ + "type": "text", + }) + + # Handle response + print(res) + +asyncio.run(main()) +``` + +### Create Embedding Request + +This example shows how to create embedding request. + +```python +# Synchronous Example +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.embeddings.create(model="mistral-embed", inputs=[ + "Embed this sentence.", + "As well as this one.", + ]) + + # Handle response + print(res) +``` + +
+ +The same SDK client can also be used to make asynchronous requests by importing asyncio. + +```python +# Asynchronous Example +import asyncio +from mistralai.client import Mistral +import os + +async def main(): + + async with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), + ) as mistral: + + res = await mistral.embeddings.create_async(model="mistral-embed", inputs=[ + "Embed this sentence.", + "As well as this one.", + ]) + + # Handle response + print(res) + +asyncio.run(main()) +``` + \ No newline at end of file diff --git a/docs/errors/httpvalidationerror.md b/docs/errors/httpvalidationerror.md new file mode 100644 index 00000000..712a148c --- /dev/null +++ b/docs/errors/httpvalidationerror.md @@ -0,0 +1,8 @@ +# HTTPValidationError + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | +| `detail` | List[[models.ValidationError](../models/validationerror.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/agent.md b/docs/models/agent.md new file mode 100644 index 00000000..4de5a901 --- /dev/null +++ b/docs/models/agent.md @@ -0,0 +1,24 @@ +# Agent + + +## Fields + +| Field | Type | Required | Description | +| ----------------------------------------------------------------------- | ----------------------------------------------------------------------- | ----------------------------------------------------------------------- | ----------------------------------------------------------------------- | +| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | Instruction prompt the model will follow during the conversation. | +| `tools` | List[[models.AgentTool](../models/agenttool.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | +| `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `model` | *str* | :heavy_check_mark: | N/A | +| `name` | *str* | :heavy_check_mark: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `handoffs` | List[*str*] | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | +| `object` | *Optional[Literal["agent"]]* | :heavy_minus_sign: | N/A | +| `id` | *str* | :heavy_check_mark: | N/A | +| `version` | *int* | :heavy_check_mark: | N/A | +| `versions` | List[*int*] | :heavy_check_mark: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | +| `updated_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | +| `deployment_chat` | *bool* | :heavy_check_mark: | N/A | +| `source` | *str* | :heavy_check_mark: | N/A | +| `version_message` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/agentaliasresponse.md b/docs/models/agentaliasresponse.md new file mode 100644 index 00000000..aa531ec5 --- /dev/null +++ b/docs/models/agentaliasresponse.md @@ -0,0 +1,11 @@ +# AgentAliasResponse + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `alias` | *str* | :heavy_check_mark: | N/A | +| `version` | *int* | :heavy_check_mark: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | +| `updated_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agentconversation.md b/docs/models/agentconversation.md new file mode 100644 index 00000000..451f6fb8 --- /dev/null +++ b/docs/models/agentconversation.md @@ -0,0 +1,16 @@ +# AgentConversation + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------- | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | Name given to the conversation. | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | Description of the what the conversation is about. | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | Custom metadata for the conversation. | +| `object` | *Optional[Literal["conversation"]]* | :heavy_minus_sign: | N/A | +| `id` | *str* | :heavy_check_mark: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | +| `updated_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `agent_version` | [OptionalNullable[models.AgentConversationAgentVersion]](../models/agentconversationagentversion.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/agentconversationagentversion.md b/docs/models/agentconversationagentversion.md new file mode 100644 index 00000000..668a8dc0 --- /dev/null +++ b/docs/models/agentconversationagentversion.md @@ -0,0 +1,17 @@ +# AgentConversationAgentVersion + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `int` + +```python +value: int = /* values here */ +``` + diff --git a/docs/models/agenthandoffdoneevent.md b/docs/models/agenthandoffdoneevent.md new file mode 100644 index 00000000..6bfcc3d8 --- /dev/null +++ b/docs/models/agenthandoffdoneevent.md @@ -0,0 +1,13 @@ +# AgentHandoffDoneEvent + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `type` | *Literal["agent.handoff.done"]* | :heavy_check_mark: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `id` | *str* | :heavy_check_mark: | N/A | +| `next_agent_id` | *str* | :heavy_check_mark: | N/A | +| `next_agent_name` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agenthandoffentry.md b/docs/models/agenthandoffentry.md new file mode 100644 index 00000000..2b689ec7 --- /dev/null +++ b/docs/models/agenthandoffentry.md @@ -0,0 +1,16 @@ +# AgentHandoffEntry + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `object` | *Optional[Literal["entry"]]* | :heavy_minus_sign: | N/A | +| `type` | *Optional[Literal["agent.handoff"]]* | :heavy_minus_sign: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `completed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `previous_agent_id` | *str* | :heavy_check_mark: | N/A | +| `previous_agent_name` | *str* | :heavy_check_mark: | N/A | +| `next_agent_id` | *str* | :heavy_check_mark: | N/A | +| `next_agent_name` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agenthandoffstartedevent.md b/docs/models/agenthandoffstartedevent.md new file mode 100644 index 00000000..518b5a0c --- /dev/null +++ b/docs/models/agenthandoffstartedevent.md @@ -0,0 +1,13 @@ +# AgentHandoffStartedEvent + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `type` | *Literal["agent.handoff.started"]* | :heavy_check_mark: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `id` | *str* | :heavy_check_mark: | N/A | +| `previous_agent_id` | *str* | :heavy_check_mark: | N/A | +| `previous_agent_name` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1agentscreateorupdatealiasrequest.md b/docs/models/agentsapiv1agentscreateorupdatealiasrequest.md new file mode 100644 index 00000000..79406434 --- /dev/null +++ b/docs/models/agentsapiv1agentscreateorupdatealiasrequest.md @@ -0,0 +1,10 @@ +# AgentsAPIV1AgentsCreateOrUpdateAliasRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `alias` | *str* | :heavy_check_mark: | N/A | +| `version` | *int* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1agentsdeletealiasrequest.md b/docs/models/agentsapiv1agentsdeletealiasrequest.md new file mode 100644 index 00000000..8e95c0c3 --- /dev/null +++ b/docs/models/agentsapiv1agentsdeletealiasrequest.md @@ -0,0 +1,9 @@ +# AgentsAPIV1AgentsDeleteAliasRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `alias` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1agentsdeleterequest.md b/docs/models/agentsapiv1agentsdeleterequest.md new file mode 100644 index 00000000..2799f418 --- /dev/null +++ b/docs/models/agentsapiv1agentsdeleterequest.md @@ -0,0 +1,8 @@ +# AgentsAPIV1AgentsDeleteRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `agent_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1agentsgetagentversion.md b/docs/models/agentsapiv1agentsgetagentversion.md new file mode 100644 index 00000000..7fb9f2d5 --- /dev/null +++ b/docs/models/agentsapiv1agentsgetagentversion.md @@ -0,0 +1,17 @@ +# AgentsAPIV1AgentsGetAgentVersion + + +## Supported Types + +### `int` + +```python +value: int = /* values here */ +``` + +### `str` + +```python +value: str = /* values here */ +``` + diff --git a/docs/models/agentsapiv1agentsgetrequest.md b/docs/models/agentsapiv1agentsgetrequest.md new file mode 100644 index 00000000..ceffe009 --- /dev/null +++ b/docs/models/agentsapiv1agentsgetrequest.md @@ -0,0 +1,9 @@ +# AgentsAPIV1AgentsGetRequest + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `agent_version` | [OptionalNullable[models.AgentsAPIV1AgentsGetAgentVersion]](../models/agentsapiv1agentsgetagentversion.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1agentsgetversionrequest.md b/docs/models/agentsapiv1agentsgetversionrequest.md new file mode 100644 index 00000000..96a73589 --- /dev/null +++ b/docs/models/agentsapiv1agentsgetversionrequest.md @@ -0,0 +1,9 @@ +# AgentsAPIV1AgentsGetVersionRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `version` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1agentslistrequest.md b/docs/models/agentsapiv1agentslistrequest.md new file mode 100644 index 00000000..4785a54c --- /dev/null +++ b/docs/models/agentsapiv1agentslistrequest.md @@ -0,0 +1,15 @@ +# AgentsAPIV1AgentsListRequest + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | +| `page` | *Optional[int]* | :heavy_minus_sign: | Page number (0-indexed) | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | Number of agents per page | +| `deployment_chat` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | +| `sources` | List[[models.RequestSource](../models/requestsource.md)] | :heavy_minus_sign: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | Filter by agent name | +| `search` | *OptionalNullable[str]* | :heavy_minus_sign: | Search agents by name or ID | +| `id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1agentslistversionaliasesrequest.md b/docs/models/agentsapiv1agentslistversionaliasesrequest.md new file mode 100644 index 00000000..3083bf92 --- /dev/null +++ b/docs/models/agentsapiv1agentslistversionaliasesrequest.md @@ -0,0 +1,8 @@ +# AgentsAPIV1AgentsListVersionAliasesRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `agent_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1agentslistversionsrequest.md b/docs/models/agentsapiv1agentslistversionsrequest.md new file mode 100644 index 00000000..91831700 --- /dev/null +++ b/docs/models/agentsapiv1agentslistversionsrequest.md @@ -0,0 +1,10 @@ +# AgentsAPIV1AgentsListVersionsRequest + + +## Fields + +| Field | Type | Required | Description | +| --------------------------- | --------------------------- | --------------------------- | --------------------------- | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `page` | *Optional[int]* | :heavy_minus_sign: | Page number (0-indexed) | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | Number of versions per page | \ No newline at end of file diff --git a/docs/models/agentsapiv1agentsupdaterequest.md b/docs/models/agentsapiv1agentsupdaterequest.md new file mode 100644 index 00000000..7ef60bec --- /dev/null +++ b/docs/models/agentsapiv1agentsupdaterequest.md @@ -0,0 +1,9 @@ +# AgentsAPIV1AgentsUpdateRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `update_agent_request` | [models.UpdateAgentRequest](../models/updateagentrequest.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1agentsupdateversionrequest.md b/docs/models/agentsapiv1agentsupdateversionrequest.md new file mode 100644 index 00000000..e937acc9 --- /dev/null +++ b/docs/models/agentsapiv1agentsupdateversionrequest.md @@ -0,0 +1,9 @@ +# AgentsAPIV1AgentsUpdateVersionRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `version` | *int* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1conversationsappendrequest.md b/docs/models/agentsapiv1conversationsappendrequest.md new file mode 100644 index 00000000..ac8a00ec --- /dev/null +++ b/docs/models/agentsapiv1conversationsappendrequest.md @@ -0,0 +1,9 @@ +# AgentsAPIV1ConversationsAppendRequest + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | +| `conversation_id` | *str* | :heavy_check_mark: | ID of the conversation to which we append entries. | +| `conversation_append_request` | [models.ConversationAppendRequest](../models/conversationappendrequest.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1conversationsappendstreamrequest.md b/docs/models/agentsapiv1conversationsappendstreamrequest.md new file mode 100644 index 00000000..dbc330f1 --- /dev/null +++ b/docs/models/agentsapiv1conversationsappendstreamrequest.md @@ -0,0 +1,9 @@ +# AgentsAPIV1ConversationsAppendStreamRequest + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | +| `conversation_id` | *str* | :heavy_check_mark: | ID of the conversation to which we append entries. | +| `conversation_append_stream_request` | [models.ConversationAppendStreamRequest](../models/conversationappendstreamrequest.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1conversationsdeleterequest.md b/docs/models/agentsapiv1conversationsdeleterequest.md new file mode 100644 index 00000000..c6eed281 --- /dev/null +++ b/docs/models/agentsapiv1conversationsdeleterequest.md @@ -0,0 +1,8 @@ +# AgentsAPIV1ConversationsDeleteRequest + + +## Fields + +| Field | Type | Required | Description | +| ----------------------------------------------------------- | ----------------------------------------------------------- | ----------------------------------------------------------- | ----------------------------------------------------------- | +| `conversation_id` | *str* | :heavy_check_mark: | ID of the conversation from which we are fetching metadata. | \ No newline at end of file diff --git a/docs/models/agentsapiv1conversationsgetrequest.md b/docs/models/agentsapiv1conversationsgetrequest.md new file mode 100644 index 00000000..67d450c8 --- /dev/null +++ b/docs/models/agentsapiv1conversationsgetrequest.md @@ -0,0 +1,8 @@ +# AgentsAPIV1ConversationsGetRequest + + +## Fields + +| Field | Type | Required | Description | +| ----------------------------------------------------------- | ----------------------------------------------------------- | ----------------------------------------------------------- | ----------------------------------------------------------- | +| `conversation_id` | *str* | :heavy_check_mark: | ID of the conversation from which we are fetching metadata. | \ No newline at end of file diff --git a/docs/models/agentsapiv1conversationshistoryrequest.md b/docs/models/agentsapiv1conversationshistoryrequest.md new file mode 100644 index 00000000..7e5d39e9 --- /dev/null +++ b/docs/models/agentsapiv1conversationshistoryrequest.md @@ -0,0 +1,8 @@ +# AgentsAPIV1ConversationsHistoryRequest + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | +| `conversation_id` | *str* | :heavy_check_mark: | ID of the conversation from which we are fetching entries. | \ No newline at end of file diff --git a/docs/models/agentsapiv1conversationslistrequest.md b/docs/models/agentsapiv1conversationslistrequest.md new file mode 100644 index 00000000..62c9011f --- /dev/null +++ b/docs/models/agentsapiv1conversationslistrequest.md @@ -0,0 +1,10 @@ +# AgentsAPIV1ConversationsListRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `page` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1conversationslistresponse.md b/docs/models/agentsapiv1conversationslistresponse.md new file mode 100644 index 00000000..b233ee20 --- /dev/null +++ b/docs/models/agentsapiv1conversationslistresponse.md @@ -0,0 +1,17 @@ +# AgentsAPIV1ConversationsListResponse + + +## Supported Types + +### `models.ModelConversation` + +```python +value: models.ModelConversation = /* values here */ +``` + +### `models.AgentConversation` + +```python +value: models.AgentConversation = /* values here */ +``` + diff --git a/docs/models/agentsapiv1conversationsmessagesrequest.md b/docs/models/agentsapiv1conversationsmessagesrequest.md new file mode 100644 index 00000000..a91ab046 --- /dev/null +++ b/docs/models/agentsapiv1conversationsmessagesrequest.md @@ -0,0 +1,8 @@ +# AgentsAPIV1ConversationsMessagesRequest + + +## Fields + +| Field | Type | Required | Description | +| ----------------------------------------------------------- | ----------------------------------------------------------- | ----------------------------------------------------------- | ----------------------------------------------------------- | +| `conversation_id` | *str* | :heavy_check_mark: | ID of the conversation from which we are fetching messages. | \ No newline at end of file diff --git a/docs/models/agentsapiv1conversationsrestartrequest.md b/docs/models/agentsapiv1conversationsrestartrequest.md new file mode 100644 index 00000000..a18a41f5 --- /dev/null +++ b/docs/models/agentsapiv1conversationsrestartrequest.md @@ -0,0 +1,9 @@ +# AgentsAPIV1ConversationsRestartRequest + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `conversation_id` | *str* | :heavy_check_mark: | ID of the original conversation which is being restarted. | +| `conversation_restart_request` | [models.ConversationRestartRequest](../models/conversationrestartrequest.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1conversationsrestartstreamrequest.md b/docs/models/agentsapiv1conversationsrestartstreamrequest.md new file mode 100644 index 00000000..7548286a --- /dev/null +++ b/docs/models/agentsapiv1conversationsrestartstreamrequest.md @@ -0,0 +1,9 @@ +# AgentsAPIV1ConversationsRestartStreamRequest + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | +| `conversation_id` | *str* | :heavy_check_mark: | ID of the original conversation which is being restarted. | +| `conversation_restart_stream_request` | [models.ConversationRestartStreamRequest](../models/conversationrestartstreamrequest.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agentscompletionrequest.md b/docs/models/agentscompletionrequest.md new file mode 100644 index 00000000..33435732 --- /dev/null +++ b/docs/models/agentscompletionrequest.md @@ -0,0 +1,23 @@ +# AgentsCompletionRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | +| `stop` | [Optional[models.AgentsCompletionRequestStop]](../models/agentscompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | +| `messages` | List[[models.AgentsCompletionRequestMessage](../models/agentscompletionrequestmessage.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | **Example 1:** {
"type": "text"
}
**Example 2:** {
"type": "json_object"
}
**Example 3:** {
"type": "json_schema",
"json_schema": {
"schema": {
"properties": {
"name": {
"title": "Name",
"type": "string"
},
"authors": {
"items": {
"type": "string"
},
"title": "Authors",
"type": "array"
}
},
"required": [
"name",
"authors"
],
"title": "Book",
"type": "object",
"additionalProperties": false
},
"name": "book",
"strict": true
}
} | +| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | +| `tool_choice` | [Optional[models.AgentsCompletionRequestToolChoice]](../models/agentscompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | +| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | +| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. | | +| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | +| `agent_id` | *str* | :heavy_check_mark: | The ID of the agent to use for this completion. | | \ No newline at end of file diff --git a/docs/models/agentscompletionrequestmessage.md b/docs/models/agentscompletionrequestmessage.md new file mode 100644 index 00000000..957703b5 --- /dev/null +++ b/docs/models/agentscompletionrequestmessage.md @@ -0,0 +1,29 @@ +# AgentsCompletionRequestMessage + + +## Supported Types + +### `models.AssistantMessage` + +```python +value: models.AssistantMessage = /* values here */ +``` + +### `models.SystemMessage` + +```python +value: models.SystemMessage = /* values here */ +``` + +### `models.ToolMessage` + +```python +value: models.ToolMessage = /* values here */ +``` + +### `models.UserMessage` + +```python +value: models.UserMessage = /* values here */ +``` + diff --git a/docs/models/agentscompletionrequeststop.md b/docs/models/agentscompletionrequeststop.md new file mode 100644 index 00000000..21ce6fb5 --- /dev/null +++ b/docs/models/agentscompletionrequeststop.md @@ -0,0 +1,19 @@ +# AgentsCompletionRequestStop + +Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[str]` + +```python +value: List[str] = /* values here */ +``` + diff --git a/docs/models/agentscompletionrequesttoolchoice.md b/docs/models/agentscompletionrequesttoolchoice.md new file mode 100644 index 00000000..63b9dca9 --- /dev/null +++ b/docs/models/agentscompletionrequesttoolchoice.md @@ -0,0 +1,17 @@ +# AgentsCompletionRequestToolChoice + + +## Supported Types + +### `models.ToolChoice` + +```python +value: models.ToolChoice = /* values here */ +``` + +### `models.ToolChoiceEnum` + +```python +value: models.ToolChoiceEnum = /* values here */ +``` + diff --git a/docs/models/agentscompletionstreamrequest.md b/docs/models/agentscompletionstreamrequest.md new file mode 100644 index 00000000..407be8e0 --- /dev/null +++ b/docs/models/agentscompletionstreamrequest.md @@ -0,0 +1,23 @@ +# AgentsCompletionStreamRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `stop` | [Optional[models.AgentsCompletionStreamRequestStop]](../models/agentscompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | +| `messages` | List[[models.AgentsCompletionStreamRequestMessage](../models/agentscompletionstreamrequestmessage.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | **Example 1:** {
"type": "text"
}
**Example 2:** {
"type": "json_object"
}
**Example 3:** {
"type": "json_schema",
"json_schema": {
"schema": {
"properties": {
"name": {
"title": "Name",
"type": "string"
},
"authors": {
"items": {
"type": "string"
},
"title": "Authors",
"type": "array"
}
},
"required": [
"name",
"authors"
],
"title": "Book",
"type": "object",
"additionalProperties": false
},
"name": "book",
"strict": true
}
} | +| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | +| `tool_choice` | [Optional[models.AgentsCompletionStreamRequestToolChoice]](../models/agentscompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | +| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | +| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. | | +| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | +| `agent_id` | *str* | :heavy_check_mark: | The ID of the agent to use for this completion. | | \ No newline at end of file diff --git a/docs/models/agentscompletionstreamrequestmessage.md b/docs/models/agentscompletionstreamrequestmessage.md new file mode 100644 index 00000000..6ccf4244 --- /dev/null +++ b/docs/models/agentscompletionstreamrequestmessage.md @@ -0,0 +1,29 @@ +# AgentsCompletionStreamRequestMessage + + +## Supported Types + +### `models.AssistantMessage` + +```python +value: models.AssistantMessage = /* values here */ +``` + +### `models.SystemMessage` + +```python +value: models.SystemMessage = /* values here */ +``` + +### `models.ToolMessage` + +```python +value: models.ToolMessage = /* values here */ +``` + +### `models.UserMessage` + +```python +value: models.UserMessage = /* values here */ +``` + diff --git a/docs/models/agentscompletionstreamrequeststop.md b/docs/models/agentscompletionstreamrequeststop.md new file mode 100644 index 00000000..981005f3 --- /dev/null +++ b/docs/models/agentscompletionstreamrequeststop.md @@ -0,0 +1,19 @@ +# AgentsCompletionStreamRequestStop + +Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[str]` + +```python +value: List[str] = /* values here */ +``` + diff --git a/docs/models/agentscompletionstreamrequesttoolchoice.md b/docs/models/agentscompletionstreamrequesttoolchoice.md new file mode 100644 index 00000000..4354523a --- /dev/null +++ b/docs/models/agentscompletionstreamrequesttoolchoice.md @@ -0,0 +1,17 @@ +# AgentsCompletionStreamRequestToolChoice + + +## Supported Types + +### `models.ToolChoice` + +```python +value: models.ToolChoice = /* values here */ +``` + +### `models.ToolChoiceEnum` + +```python +value: models.ToolChoiceEnum = /* values here */ +``` + diff --git a/docs/models/agenttool.md b/docs/models/agenttool.md new file mode 100644 index 00000000..022f7e10 --- /dev/null +++ b/docs/models/agenttool.md @@ -0,0 +1,41 @@ +# AgentTool + + +## Supported Types + +### `models.CodeInterpreterTool` + +```python +value: models.CodeInterpreterTool = /* values here */ +``` + +### `models.DocumentLibraryTool` + +```python +value: models.DocumentLibraryTool = /* values here */ +``` + +### `models.FunctionTool` + +```python +value: models.FunctionTool = /* values here */ +``` + +### `models.ImageGenerationTool` + +```python +value: models.ImageGenerationTool = /* values here */ +``` + +### `models.WebSearchTool` + +```python +value: models.WebSearchTool = /* values here */ +``` + +### `models.WebSearchPremiumTool` + +```python +value: models.WebSearchPremiumTool = /* values here */ +``` + diff --git a/docs/models/apiendpoint.md b/docs/models/apiendpoint.md new file mode 100644 index 00000000..8d83a26f --- /dev/null +++ b/docs/models/apiendpoint.md @@ -0,0 +1,17 @@ +# APIEndpoint + + +## Values + +| Name | Value | +| ------------------------------ | ------------------------------ | +| `ROOT_V1_CHAT_COMPLETIONS` | /v1/chat/completions | +| `ROOT_V1_EMBEDDINGS` | /v1/embeddings | +| `ROOT_V1_FIM_COMPLETIONS` | /v1/fim/completions | +| `ROOT_V1_MODERATIONS` | /v1/moderations | +| `ROOT_V1_CHAT_MODERATIONS` | /v1/chat/moderations | +| `ROOT_V1_OCR` | /v1/ocr | +| `ROOT_V1_CLASSIFICATIONS` | /v1/classifications | +| `ROOT_V1_CHAT_CLASSIFICATIONS` | /v1/chat/classifications | +| `ROOT_V1_CONVERSATIONS` | /v1/conversations | +| `ROOT_V1_AUDIO_TRANSCRIPTIONS` | /v1/audio/transcriptions | \ No newline at end of file diff --git a/docs/models/archivemodelresponse.md b/docs/models/archivemodelresponse.md new file mode 100644 index 00000000..276656d1 --- /dev/null +++ b/docs/models/archivemodelresponse.md @@ -0,0 +1,10 @@ +# ArchiveModelResponse + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------- | ---------------------------- | ---------------------------- | ---------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | +| `object` | *Optional[Literal["model"]]* | :heavy_minus_sign: | N/A | +| `archived` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/arguments.md b/docs/models/arguments.md new file mode 100644 index 00000000..2e54e27e --- /dev/null +++ b/docs/models/arguments.md @@ -0,0 +1,17 @@ +# Arguments + + +## Supported Types + +### `Dict[str, Any]` + +```python +value: Dict[str, Any] = /* values here */ +``` + +### `str` + +```python +value: str = /* values here */ +``` + diff --git a/docs/models/assistantmessage.md b/docs/models/assistantmessage.md new file mode 100644 index 00000000..9ef63837 --- /dev/null +++ b/docs/models/assistantmessage.md @@ -0,0 +1,11 @@ +# AssistantMessage + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `role` | *Optional[Literal["assistant"]]* | :heavy_minus_sign: | N/A | +| `content` | [OptionalNullable[models.AssistantMessageContent]](../models/assistantmessagecontent.md) | :heavy_minus_sign: | N/A | +| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | +| `prefix` | *Optional[bool]* | :heavy_minus_sign: | Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message. | \ No newline at end of file diff --git a/docs/models/assistantmessagecontent.md b/docs/models/assistantmessagecontent.md new file mode 100644 index 00000000..047b7cf9 --- /dev/null +++ b/docs/models/assistantmessagecontent.md @@ -0,0 +1,17 @@ +# AssistantMessageContent + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[models.ContentChunk]` + +```python +value: List[models.ContentChunk] = /* values here */ +``` + diff --git a/docs/models/attributes.md b/docs/models/attributes.md new file mode 100644 index 00000000..147708d9 --- /dev/null +++ b/docs/models/attributes.md @@ -0,0 +1,59 @@ +# Attributes + + +## Supported Types + +### `bool` + +```python +value: bool = /* values here */ +``` + +### `str` + +```python +value: str = /* values here */ +``` + +### `int` + +```python +value: int = /* values here */ +``` + +### `float` + +```python +value: float = /* values here */ +``` + +### `datetime` + +```python +value: datetime = /* values here */ +``` + +### `List[str]` + +```python +value: List[str] = /* values here */ +``` + +### `List[int]` + +```python +value: List[int] = /* values here */ +``` + +### `List[float]` + +```python +value: List[float] = /* values here */ +``` + +### `List[bool]` + +```python +value: List[bool] = /* values here */ +``` + diff --git a/docs/models/audiochunk.md b/docs/models/audiochunk.md new file mode 100644 index 00000000..1ba8b0f5 --- /dev/null +++ b/docs/models/audiochunk.md @@ -0,0 +1,9 @@ +# AudioChunk + + +## Fields + +| Field | Type | Required | Description | +| ------------------------ | ------------------------ | ------------------------ | ------------------------ | +| `type` | *Literal["input_audio"]* | :heavy_check_mark: | N/A | +| `input_audio` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/audioencoding.md b/docs/models/audioencoding.md new file mode 100644 index 00000000..feec8c71 --- /dev/null +++ b/docs/models/audioencoding.md @@ -0,0 +1,13 @@ +# AudioEncoding + + +## Values + +| Name | Value | +| ----------- | ----------- | +| `PCM_S16LE` | pcm_s16le | +| `PCM_S32LE` | pcm_s32le | +| `PCM_F16LE` | pcm_f16le | +| `PCM_F32LE` | pcm_f32le | +| `PCM_MULAW` | pcm_mulaw | +| `PCM_ALAW` | pcm_alaw | \ No newline at end of file diff --git a/docs/models/audioformat.md b/docs/models/audioformat.md new file mode 100644 index 00000000..d174ab99 --- /dev/null +++ b/docs/models/audioformat.md @@ -0,0 +1,9 @@ +# AudioFormat + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | +| `encoding` | [models.AudioEncoding](../models/audioencoding.md) | :heavy_check_mark: | N/A | +| `sample_rate` | *int* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/audiotranscriptionrequest.md b/docs/models/audiotranscriptionrequest.md new file mode 100644 index 00000000..80bd5301 --- /dev/null +++ b/docs/models/audiotranscriptionrequest.md @@ -0,0 +1,17 @@ +# AudioTranscriptionRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | ID of the model to be used. | **Example 1:** voxtral-mini-latest
**Example 2:** voxtral-mini-2507 | +| `file` | [Optional[models.File]](../models/file.md) | :heavy_minus_sign: | N/A | | +| `file_url` | *OptionalNullable[str]* | :heavy_minus_sign: | Url of a file to be transcribed | | +| `file_id` | *OptionalNullable[str]* | :heavy_minus_sign: | ID of a file uploaded to /v1/files | | +| `language` | *OptionalNullable[str]* | :heavy_minus_sign: | Language of the audio, e.g. 'en'. Providing the language can boost accuracy. | | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | | +| `stream` | *Optional[Literal[False]]* | :heavy_minus_sign: | N/A | | +| `diarize` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `context_bias` | List[*str*] | :heavy_minus_sign: | N/A | | +| `timestamp_granularities` | List[[models.TimestampGranularity](../models/timestampgranularity.md)] | :heavy_minus_sign: | Granularities of timestamps to include in the response. | | \ No newline at end of file diff --git a/docs/models/audiotranscriptionrequeststream.md b/docs/models/audiotranscriptionrequeststream.md new file mode 100644 index 00000000..5d64964d --- /dev/null +++ b/docs/models/audiotranscriptionrequeststream.md @@ -0,0 +1,17 @@ +# AudioTranscriptionRequestStream + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | N/A | +| `file` | [Optional[models.File]](../models/file.md) | :heavy_minus_sign: | N/A | +| `file_url` | *OptionalNullable[str]* | :heavy_minus_sign: | Url of a file to be transcribed | +| `file_id` | *OptionalNullable[str]* | :heavy_minus_sign: | ID of a file uploaded to /v1/files | +| `language` | *OptionalNullable[str]* | :heavy_minus_sign: | Language of the audio, e.g. 'en'. Providing the language can boost accuracy. | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | +| `stream` | *Optional[Literal[True]]* | :heavy_minus_sign: | N/A | +| `diarize` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `context_bias` | List[*str*] | :heavy_minus_sign: | N/A | +| `timestamp_granularities` | List[[models.TimestampGranularity](../models/timestampgranularity.md)] | :heavy_minus_sign: | Granularities of timestamps to include in the response. | \ No newline at end of file diff --git a/docs/models/basemodelcard.md b/docs/models/basemodelcard.md new file mode 100644 index 00000000..0f42504f --- /dev/null +++ b/docs/models/basemodelcard.md @@ -0,0 +1,20 @@ +# BaseModelCard + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | +| `object` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `created` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `owned_by` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `capabilities` | [models.ModelCapabilities](../models/modelcapabilities.md) | :heavy_check_mark: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `max_context_length` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `aliases` | List[*str*] | :heavy_minus_sign: | N/A | +| `deprecation` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `deprecation_replacement_model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `default_model_temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | +| `type` | *Literal["base"]* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/batcherror.md b/docs/models/batcherror.md new file mode 100644 index 00000000..95016cdc --- /dev/null +++ b/docs/models/batcherror.md @@ -0,0 +1,9 @@ +# BatchError + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `message` | *str* | :heavy_check_mark: | N/A | +| `count` | *Optional[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/batchjob.md b/docs/models/batchjob.md new file mode 100644 index 00000000..162e2cff --- /dev/null +++ b/docs/models/batchjob.md @@ -0,0 +1,26 @@ +# BatchJob + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | +| `object` | *Optional[Literal["batch"]]* | :heavy_minus_sign: | N/A | +| `input_files` | List[*str*] | :heavy_check_mark: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | +| `endpoint` | *str* | :heavy_check_mark: | N/A | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `output_file` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `error_file` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `errors` | List[[models.BatchError](../models/batcherror.md)] | :heavy_check_mark: | N/A | +| `outputs` | List[Dict[str, *Any*]] | :heavy_minus_sign: | N/A | +| `status` | [models.BatchJobStatus](../models/batchjobstatus.md) | :heavy_check_mark: | N/A | +| `created_at` | *int* | :heavy_check_mark: | N/A | +| `total_requests` | *int* | :heavy_check_mark: | N/A | +| `completed_requests` | *int* | :heavy_check_mark: | N/A | +| `succeeded_requests` | *int* | :heavy_check_mark: | N/A | +| `failed_requests` | *int* | :heavy_check_mark: | N/A | +| `started_at` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `completed_at` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/batchjobstatus.md b/docs/models/batchjobstatus.md new file mode 100644 index 00000000..64617b31 --- /dev/null +++ b/docs/models/batchjobstatus.md @@ -0,0 +1,14 @@ +# BatchJobStatus + + +## Values + +| Name | Value | +| ------------------------ | ------------------------ | +| `QUEUED` | QUEUED | +| `RUNNING` | RUNNING | +| `SUCCESS` | SUCCESS | +| `FAILED` | FAILED | +| `TIMEOUT_EXCEEDED` | TIMEOUT_EXCEEDED | +| `CANCELLATION_REQUESTED` | CANCELLATION_REQUESTED | +| `CANCELLED` | CANCELLED | \ No newline at end of file diff --git a/docs/models/batchrequest.md b/docs/models/batchrequest.md new file mode 100644 index 00000000..6ee3b394 --- /dev/null +++ b/docs/models/batchrequest.md @@ -0,0 +1,9 @@ +# BatchRequest + + +## Fields + +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `custom_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `body` | Dict[str, *Any*] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/builtinconnectors.md b/docs/models/builtinconnectors.md new file mode 100644 index 00000000..f96f5044 --- /dev/null +++ b/docs/models/builtinconnectors.md @@ -0,0 +1,12 @@ +# BuiltInConnectors + + +## Values + +| Name | Value | +| -------------------- | -------------------- | +| `WEB_SEARCH` | web_search | +| `WEB_SEARCH_PREMIUM` | web_search_premium | +| `CODE_INTERPRETER` | code_interpreter | +| `IMAGE_GENERATION` | image_generation | +| `DOCUMENT_LIBRARY` | document_library | \ No newline at end of file diff --git a/docs/models/chatclassificationrequest.md b/docs/models/chatclassificationrequest.md new file mode 100644 index 00000000..ba9c95ea --- /dev/null +++ b/docs/models/chatclassificationrequest.md @@ -0,0 +1,9 @@ +# ChatClassificationRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------ | ------------------------------------ | ------------------------------------ | ------------------------------------ | +| `model` | *str* | :heavy_check_mark: | N/A | +| `input` | [models.Inputs](../models/inputs.md) | :heavy_check_mark: | Chat to classify | \ No newline at end of file diff --git a/docs/models/chatcompletionchoice.md b/docs/models/chatcompletionchoice.md new file mode 100644 index 00000000..deaa0ea0 --- /dev/null +++ b/docs/models/chatcompletionchoice.md @@ -0,0 +1,10 @@ +# ChatCompletionChoice + + +## Fields + +| Field | Type | Required | Description | Example | +| ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | +| `index` | *int* | :heavy_check_mark: | N/A | 0 | +| `message` | [models.AssistantMessage](../models/assistantmessage.md) | :heavy_check_mark: | N/A | | +| `finish_reason` | [models.ChatCompletionChoiceFinishReason](../models/chatcompletionchoicefinishreason.md) | :heavy_check_mark: | N/A | stop | \ No newline at end of file diff --git a/docs/models/chatcompletionchoicefinishreason.md b/docs/models/chatcompletionchoicefinishreason.md new file mode 100644 index 00000000..b2f15ecb --- /dev/null +++ b/docs/models/chatcompletionchoicefinishreason.md @@ -0,0 +1,12 @@ +# ChatCompletionChoiceFinishReason + + +## Values + +| Name | Value | +| -------------- | -------------- | +| `STOP` | stop | +| `LENGTH` | length | +| `MODEL_LENGTH` | model_length | +| `ERROR` | error | +| `TOOL_CALLS` | tool_calls | \ No newline at end of file diff --git a/docs/models/chatcompletionrequest.md b/docs/models/chatcompletionrequest.md new file mode 100644 index 00000000..921161fa --- /dev/null +++ b/docs/models/chatcompletionrequest.md @@ -0,0 +1,26 @@ +# ChatCompletionRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-large-latest | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | +| `stop` | [Optional[models.ChatCompletionRequestStop]](../models/chatcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | +| `messages` | List[[models.ChatCompletionRequestMessage](../models/chatcompletionrequestmessage.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | **Example 1:** {
"type": "text"
}
**Example 2:** {
"type": "json_object"
}
**Example 3:** {
"type": "json_schema",
"json_schema": {
"schema": {
"properties": {
"name": {
"title": "Name",
"type": "string"
},
"authors": {
"items": {
"type": "string"
},
"title": "Authors",
"type": "array"
}
},
"required": [
"name",
"authors"
],
"title": "Book",
"type": "object",
"additionalProperties": false
},
"name": "book",
"strict": true
}
} | +| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. | | +| `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. | | +| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | +| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. | | +| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | +| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file diff --git a/docs/models/chatcompletionrequestmessage.md b/docs/models/chatcompletionrequestmessage.md new file mode 100644 index 00000000..91e9e062 --- /dev/null +++ b/docs/models/chatcompletionrequestmessage.md @@ -0,0 +1,29 @@ +# ChatCompletionRequestMessage + + +## Supported Types + +### `models.AssistantMessage` + +```python +value: models.AssistantMessage = /* values here */ +``` + +### `models.SystemMessage` + +```python +value: models.SystemMessage = /* values here */ +``` + +### `models.ToolMessage` + +```python +value: models.ToolMessage = /* values here */ +``` + +### `models.UserMessage` + +```python +value: models.UserMessage = /* values here */ +``` + diff --git a/docs/models/chatcompletionrequeststop.md b/docs/models/chatcompletionrequeststop.md new file mode 100644 index 00000000..749296d4 --- /dev/null +++ b/docs/models/chatcompletionrequeststop.md @@ -0,0 +1,19 @@ +# ChatCompletionRequestStop + +Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[str]` + +```python +value: List[str] = /* values here */ +``` + diff --git a/docs/models/chatcompletionrequesttoolchoice.md b/docs/models/chatcompletionrequesttoolchoice.md new file mode 100644 index 00000000..dc82a8ef --- /dev/null +++ b/docs/models/chatcompletionrequesttoolchoice.md @@ -0,0 +1,19 @@ +# ChatCompletionRequestToolChoice + +Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. + + +## Supported Types + +### `models.ToolChoice` + +```python +value: models.ToolChoice = /* values here */ +``` + +### `models.ToolChoiceEnum` + +```python +value: models.ToolChoiceEnum = /* values here */ +``` + diff --git a/docs/models/chatcompletionresponse.md b/docs/models/chatcompletionresponse.md new file mode 100644 index 00000000..a0465ffb --- /dev/null +++ b/docs/models/chatcompletionresponse.md @@ -0,0 +1,13 @@ +# ChatCompletionResponse + + +## Fields + +| Field | Type | Required | Description | Example | +| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | cmpl-e5cc70bb28c444948073e77776eb30ef | +| `object` | *str* | :heavy_check_mark: | N/A | chat.completion | +| `model` | *str* | :heavy_check_mark: | N/A | mistral-small-latest | +| `usage` | [models.UsageInfo](../models/usageinfo.md) | :heavy_check_mark: | N/A | | +| `created` | *int* | :heavy_check_mark: | N/A | 1702256327 | +| `choices` | List[[models.ChatCompletionChoice](../models/chatcompletionchoice.md)] | :heavy_check_mark: | N/A | | \ No newline at end of file diff --git a/docs/models/chatcompletionstreamrequest.md b/docs/models/chatcompletionstreamrequest.md new file mode 100644 index 00000000..8761f000 --- /dev/null +++ b/docs/models/chatcompletionstreamrequest.md @@ -0,0 +1,26 @@ +# ChatCompletionStreamRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-large-latest | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `stop` | [Optional[models.ChatCompletionStreamRequestStop]](../models/chatcompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | +| `messages` | List[[models.ChatCompletionStreamRequestMessage](../models/chatcompletionstreamrequestmessage.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | **Example 1:** {
"type": "text"
}
**Example 2:** {
"type": "json_object"
}
**Example 3:** {
"type": "json_schema",
"json_schema": {
"schema": {
"properties": {
"name": {
"title": "Name",
"type": "string"
},
"authors": {
"items": {
"type": "string"
},
"title": "Authors",
"type": "array"
}
},
"required": [
"name",
"authors"
],
"title": "Book",
"type": "object",
"additionalProperties": false
},
"name": "book",
"strict": true
}
} | +| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. | | +| `tool_choice` | [Optional[models.ChatCompletionStreamRequestToolChoice]](../models/chatcompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. | | +| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | +| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. | | +| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | +| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file diff --git a/docs/models/chatcompletionstreamrequestmessage.md b/docs/models/chatcompletionstreamrequestmessage.md new file mode 100644 index 00000000..2e4e93ac --- /dev/null +++ b/docs/models/chatcompletionstreamrequestmessage.md @@ -0,0 +1,29 @@ +# ChatCompletionStreamRequestMessage + + +## Supported Types + +### `models.AssistantMessage` + +```python +value: models.AssistantMessage = /* values here */ +``` + +### `models.SystemMessage` + +```python +value: models.SystemMessage = /* values here */ +``` + +### `models.ToolMessage` + +```python +value: models.ToolMessage = /* values here */ +``` + +### `models.UserMessage` + +```python +value: models.UserMessage = /* values here */ +``` + diff --git a/docs/models/chatcompletionstreamrequeststop.md b/docs/models/chatcompletionstreamrequeststop.md new file mode 100644 index 00000000..a48460a9 --- /dev/null +++ b/docs/models/chatcompletionstreamrequeststop.md @@ -0,0 +1,19 @@ +# ChatCompletionStreamRequestStop + +Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[str]` + +```python +value: List[str] = /* values here */ +``` + diff --git a/docs/models/chatcompletionstreamrequesttoolchoice.md b/docs/models/chatcompletionstreamrequesttoolchoice.md new file mode 100644 index 00000000..43f3ca38 --- /dev/null +++ b/docs/models/chatcompletionstreamrequesttoolchoice.md @@ -0,0 +1,19 @@ +# ChatCompletionStreamRequestToolChoice + +Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. + + +## Supported Types + +### `models.ToolChoice` + +```python +value: models.ToolChoice = /* values here */ +``` + +### `models.ToolChoiceEnum` + +```python +value: models.ToolChoiceEnum = /* values here */ +``` + diff --git a/docs/models/chatmoderationrequest.md b/docs/models/chatmoderationrequest.md new file mode 100644 index 00000000..f252482d --- /dev/null +++ b/docs/models/chatmoderationrequest.md @@ -0,0 +1,9 @@ +# ChatModerationRequest + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | +| `inputs` | [models.ChatModerationRequestInputs3](../models/chatmoderationrequestinputs3.md) | :heavy_check_mark: | Chat to classify | +| `model` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/chatmoderationrequestinputs1.md b/docs/models/chatmoderationrequestinputs1.md new file mode 100644 index 00000000..e15b8a84 --- /dev/null +++ b/docs/models/chatmoderationrequestinputs1.md @@ -0,0 +1,29 @@ +# ChatModerationRequestInputs1 + + +## Supported Types + +### `models.AssistantMessage` + +```python +value: models.AssistantMessage = /* values here */ +``` + +### `models.SystemMessage` + +```python +value: models.SystemMessage = /* values here */ +``` + +### `models.ToolMessage` + +```python +value: models.ToolMessage = /* values here */ +``` + +### `models.UserMessage` + +```python +value: models.UserMessage = /* values here */ +``` + diff --git a/docs/models/chatmoderationrequestinputs2.md b/docs/models/chatmoderationrequestinputs2.md new file mode 100644 index 00000000..f40a4ebe --- /dev/null +++ b/docs/models/chatmoderationrequestinputs2.md @@ -0,0 +1,29 @@ +# ChatModerationRequestInputs2 + + +## Supported Types + +### `models.AssistantMessage` + +```python +value: models.AssistantMessage = /* values here */ +``` + +### `models.SystemMessage` + +```python +value: models.SystemMessage = /* values here */ +``` + +### `models.ToolMessage` + +```python +value: models.ToolMessage = /* values here */ +``` + +### `models.UserMessage` + +```python +value: models.UserMessage = /* values here */ +``` + diff --git a/docs/models/chatmoderationrequestinputs3.md b/docs/models/chatmoderationrequestinputs3.md new file mode 100644 index 00000000..ff1c6ea3 --- /dev/null +++ b/docs/models/chatmoderationrequestinputs3.md @@ -0,0 +1,19 @@ +# ChatModerationRequestInputs3 + +Chat to classify + + +## Supported Types + +### `List[models.ChatModerationRequestInputs1]` + +```python +value: List[models.ChatModerationRequestInputs1] = /* values here */ +``` + +### `List[List[models.ChatModerationRequestInputs2]]` + +```python +value: List[List[models.ChatModerationRequestInputs2]] = /* values here */ +``` + diff --git a/docs/models/checkpoint.md b/docs/models/checkpoint.md new file mode 100644 index 00000000..f7f35530 --- /dev/null +++ b/docs/models/checkpoint.md @@ -0,0 +1,10 @@ +# Checkpoint + + +## Fields + +| Field | Type | Required | Description | Example | +| ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `metrics` | [models.Metric](../models/metric.md) | :heavy_check_mark: | Metrics at the step number during the fine-tuning job. Use these metrics to assess if the training is going smoothly (loss should decrease, token accuracy should increase). | | +| `step_number` | *int* | :heavy_check_mark: | The step number that the checkpoint was created at. | | +| `created_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) for when the checkpoint was created. | 1716963433 | \ No newline at end of file diff --git a/docs/models/classificationrequest.md b/docs/models/classificationrequest.md new file mode 100644 index 00000000..99cdc4a0 --- /dev/null +++ b/docs/models/classificationrequest.md @@ -0,0 +1,10 @@ +# ClassificationRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. | mistral-moderation-latest | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | +| `inputs` | [models.ClassificationRequestInputs](../models/classificationrequestinputs.md) | :heavy_check_mark: | Text to classify. | | \ No newline at end of file diff --git a/docs/models/classificationrequestinputs.md b/docs/models/classificationrequestinputs.md new file mode 100644 index 00000000..69d75d11 --- /dev/null +++ b/docs/models/classificationrequestinputs.md @@ -0,0 +1,19 @@ +# ClassificationRequestInputs + +Text to classify. + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[str]` + +```python +value: List[str] = /* values here */ +``` + diff --git a/docs/models/classificationresponse.md b/docs/models/classificationresponse.md new file mode 100644 index 00000000..d1633ae7 --- /dev/null +++ b/docs/models/classificationresponse.md @@ -0,0 +1,10 @@ +# ClassificationResponse + + +## Fields + +| Field | Type | Required | Description | Example | +| --------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | mod-e5cc70bb28c444948073e77776eb30ef | +| `model` | *str* | :heavy_check_mark: | N/A | | +| `results` | List[Dict[str, [models.ClassificationTargetResult](../models/classificationtargetresult.md)]] | :heavy_check_mark: | N/A | | \ No newline at end of file diff --git a/docs/models/classificationtargetresult.md b/docs/models/classificationtargetresult.md new file mode 100644 index 00000000..f3b10727 --- /dev/null +++ b/docs/models/classificationtargetresult.md @@ -0,0 +1,8 @@ +# ClassificationTargetResult + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `scores` | Dict[str, *float*] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/classifierfinetunedmodel.md b/docs/models/classifierfinetunedmodel.md new file mode 100644 index 00000000..ad05f931 --- /dev/null +++ b/docs/models/classifierfinetunedmodel.md @@ -0,0 +1,23 @@ +# ClassifierFineTunedModel + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | +| `object` | *Optional[Literal["model"]]* | :heavy_minus_sign: | N/A | +| `created` | *int* | :heavy_check_mark: | N/A | +| `owned_by` | *str* | :heavy_check_mark: | N/A | +| `workspace_id` | *str* | :heavy_check_mark: | N/A | +| `root` | *str* | :heavy_check_mark: | N/A | +| `root_version` | *str* | :heavy_check_mark: | N/A | +| `archived` | *bool* | :heavy_check_mark: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `capabilities` | [models.FineTunedModelCapabilities](../models/finetunedmodelcapabilities.md) | :heavy_check_mark: | N/A | +| `max_context_length` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `aliases` | List[*str*] | :heavy_minus_sign: | N/A | +| `job` | *str* | :heavy_check_mark: | N/A | +| `classifier_targets` | List[[models.ClassifierTargetResult](../models/classifiertargetresult.md)] | :heavy_check_mark: | N/A | +| `model_type` | *Literal["classifier"]* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/classifierfinetuningjob.md b/docs/models/classifierfinetuningjob.md new file mode 100644 index 00000000..369756ba --- /dev/null +++ b/docs/models/classifierfinetuningjob.md @@ -0,0 +1,23 @@ +# ClassifierFineTuningJob + + +## Fields + +| Field | Type | Required | Description | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | The ID of the job. | +| `auto_start` | *bool* | :heavy_check_mark: | N/A | +| `model` | *str* | :heavy_check_mark: | N/A | +| `status` | [models.ClassifierFineTuningJobStatus](../models/classifierfinetuningjobstatus.md) | :heavy_check_mark: | The current status of the fine-tuning job. | +| `created_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) for when the fine-tuning job was created. | +| `modified_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) for when the fine-tuning job was last modified. | +| `training_files` | List[*str*] | :heavy_check_mark: | A list containing the IDs of uploaded files that contain training data. | +| `validation_files` | List[*str*] | :heavy_minus_sign: | A list containing the IDs of uploaded files that contain validation data. | +| `object` | *Optional[Literal["job"]]* | :heavy_minus_sign: | The object type of the fine-tuning job. | +| `fine_tuned_model` | *OptionalNullable[str]* | :heavy_minus_sign: | The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running. | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | +| `integrations` | List[[models.ClassifierFineTuningJobIntegration](../models/classifierfinetuningjobintegration.md)] | :heavy_minus_sign: | A list of integrations enabled for your fine-tuning job. | +| `trained_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | Total number of tokens trained. | +| `metadata` | [OptionalNullable[models.JobMetadata]](../models/jobmetadata.md) | :heavy_minus_sign: | N/A | +| `job_type` | *Literal["classifier"]* | :heavy_check_mark: | The type of job (`FT` for fine-tuning). | +| `hyperparameters` | [models.ClassifierTrainingParameters](../models/classifiertrainingparameters.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/classifierfinetuningjobdetails.md b/docs/models/classifierfinetuningjobdetails.md new file mode 100644 index 00000000..c5efdf1c --- /dev/null +++ b/docs/models/classifierfinetuningjobdetails.md @@ -0,0 +1,26 @@ +# ClassifierFineTuningJobDetails + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | +| `auto_start` | *bool* | :heavy_check_mark: | N/A | +| `model` | *str* | :heavy_check_mark: | N/A | +| `status` | [models.ClassifierFineTuningJobDetailsStatus](../models/classifierfinetuningjobdetailsstatus.md) | :heavy_check_mark: | N/A | +| `created_at` | *int* | :heavy_check_mark: | N/A | +| `modified_at` | *int* | :heavy_check_mark: | N/A | +| `training_files` | List[*str*] | :heavy_check_mark: | N/A | +| `validation_files` | List[*str*] | :heavy_minus_sign: | N/A | +| `object` | *Optional[Literal["job"]]* | :heavy_minus_sign: | N/A | +| `fine_tuned_model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `integrations` | List[[models.ClassifierFineTuningJobDetailsIntegration](../models/classifierfinetuningjobdetailsintegration.md)] | :heavy_minus_sign: | N/A | +| `trained_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `metadata` | [OptionalNullable[models.JobMetadata]](../models/jobmetadata.md) | :heavy_minus_sign: | N/A | +| `job_type` | *Literal["classifier"]* | :heavy_check_mark: | N/A | +| `hyperparameters` | [models.ClassifierTrainingParameters](../models/classifiertrainingparameters.md) | :heavy_check_mark: | N/A | +| `events` | List[[models.Event](../models/event.md)] | :heavy_minus_sign: | Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here. | +| `checkpoints` | List[[models.Checkpoint](../models/checkpoint.md)] | :heavy_minus_sign: | N/A | +| `classifier_targets` | List[[models.ClassifierTargetResult](../models/classifiertargetresult.md)] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/classifierfinetuningjobdetailsintegration.md b/docs/models/classifierfinetuningjobdetailsintegration.md new file mode 100644 index 00000000..438a35d9 --- /dev/null +++ b/docs/models/classifierfinetuningjobdetailsintegration.md @@ -0,0 +1,11 @@ +# ClassifierFineTuningJobDetailsIntegration + + +## Supported Types + +### `models.WandbIntegrationResult` + +```python +value: models.WandbIntegrationResult = /* values here */ +``` + diff --git a/docs/models/classifierfinetuningjobdetailsstatus.md b/docs/models/classifierfinetuningjobdetailsstatus.md new file mode 100644 index 00000000..058c6583 --- /dev/null +++ b/docs/models/classifierfinetuningjobdetailsstatus.md @@ -0,0 +1,17 @@ +# ClassifierFineTuningJobDetailsStatus + + +## Values + +| Name | Value | +| ------------------------ | ------------------------ | +| `QUEUED` | QUEUED | +| `STARTED` | STARTED | +| `VALIDATING` | VALIDATING | +| `VALIDATED` | VALIDATED | +| `RUNNING` | RUNNING | +| `FAILED_VALIDATION` | FAILED_VALIDATION | +| `FAILED` | FAILED | +| `SUCCESS` | SUCCESS | +| `CANCELLED` | CANCELLED | +| `CANCELLATION_REQUESTED` | CANCELLATION_REQUESTED | \ No newline at end of file diff --git a/docs/models/classifierfinetuningjobintegration.md b/docs/models/classifierfinetuningjobintegration.md new file mode 100644 index 00000000..820aee4c --- /dev/null +++ b/docs/models/classifierfinetuningjobintegration.md @@ -0,0 +1,11 @@ +# ClassifierFineTuningJobIntegration + + +## Supported Types + +### `models.WandbIntegrationResult` + +```python +value: models.WandbIntegrationResult = /* values here */ +``` + diff --git a/docs/models/classifierfinetuningjobstatus.md b/docs/models/classifierfinetuningjobstatus.md new file mode 100644 index 00000000..ca829885 --- /dev/null +++ b/docs/models/classifierfinetuningjobstatus.md @@ -0,0 +1,19 @@ +# ClassifierFineTuningJobStatus + +The current status of the fine-tuning job. + + +## Values + +| Name | Value | +| ------------------------ | ------------------------ | +| `QUEUED` | QUEUED | +| `STARTED` | STARTED | +| `VALIDATING` | VALIDATING | +| `VALIDATED` | VALIDATED | +| `RUNNING` | RUNNING | +| `FAILED_VALIDATION` | FAILED_VALIDATION | +| `FAILED` | FAILED | +| `SUCCESS` | SUCCESS | +| `CANCELLED` | CANCELLED | +| `CANCELLATION_REQUESTED` | CANCELLATION_REQUESTED | \ No newline at end of file diff --git a/docs/models/classifiertarget.md b/docs/models/classifiertarget.md new file mode 100644 index 00000000..f8c99e2e --- /dev/null +++ b/docs/models/classifiertarget.md @@ -0,0 +1,11 @@ +# ClassifierTarget + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | +| `name` | *str* | :heavy_check_mark: | N/A | +| `labels` | List[*str*] | :heavy_check_mark: | N/A | +| `weight` | *Optional[float]* | :heavy_minus_sign: | N/A | +| `loss_function` | [OptionalNullable[models.FTClassifierLossFunction]](../models/ftclassifierlossfunction.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/classifiertargetresult.md b/docs/models/classifiertargetresult.md new file mode 100644 index 00000000..ccadc623 --- /dev/null +++ b/docs/models/classifiertargetresult.md @@ -0,0 +1,11 @@ +# ClassifierTargetResult + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | +| `name` | *str* | :heavy_check_mark: | N/A | +| `labels` | List[*str*] | :heavy_check_mark: | N/A | +| `weight` | *float* | :heavy_check_mark: | N/A | +| `loss_function` | [models.FTClassifierLossFunction](../models/ftclassifierlossfunction.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/classifiertrainingparameters.md b/docs/models/classifiertrainingparameters.md new file mode 100644 index 00000000..3b6f3be6 --- /dev/null +++ b/docs/models/classifiertrainingparameters.md @@ -0,0 +1,13 @@ +# ClassifierTrainingParameters + + +## Fields + +| Field | Type | Required | Description | +| ------------------------- | ------------------------- | ------------------------- | ------------------------- | +| `training_steps` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `learning_rate` | *Optional[float]* | :heavy_minus_sign: | N/A | +| `weight_decay` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | +| `warmup_fraction` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | +| `epochs` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | +| `seq_len` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/codeinterpretertool.md b/docs/models/codeinterpretertool.md new file mode 100644 index 00000000..6302fc62 --- /dev/null +++ b/docs/models/codeinterpretertool.md @@ -0,0 +1,9 @@ +# CodeInterpreterTool + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `tool_configuration` | [OptionalNullable[models.ToolConfiguration]](../models/toolconfiguration.md) | :heavy_minus_sign: | N/A | +| `type` | *Literal["code_interpreter"]* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/completionargs.md b/docs/models/completionargs.md new file mode 100644 index 00000000..148f7608 --- /dev/null +++ b/docs/models/completionargs.md @@ -0,0 +1,19 @@ +# CompletionArgs + +White-listed arguments from the completion API + + +## Fields + +| Field | Type | Required | Description | Example | +| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `stop` | [OptionalNullable[models.CompletionArgsStop]](../models/completionargsstop.md) | :heavy_minus_sign: | N/A | | +| `presence_penalty` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | | +| `frequency_penalty` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | | +| `top_p` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | +| `prediction` | [OptionalNullable[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | +| `response_format` | [OptionalNullable[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | **Example 1:** {
"type": "text"
}
**Example 2:** {
"type": "json_object"
}
**Example 3:** {
"type": "json_schema",
"json_schema": {
"schema": {
"properties": {
"name": {
"title": "Name",
"type": "string"
},
"authors": {
"items": {
"type": "string"
},
"title": "Authors",
"type": "array"
}
},
"required": [
"name",
"authors"
],
"title": "Book",
"type": "object",
"additionalProperties": false
},
"name": "book",
"strict": true
}
} | +| `tool_choice` | [Optional[models.ToolChoiceEnum]](../models/toolchoiceenum.md) | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/completionargsstop.md b/docs/models/completionargsstop.md new file mode 100644 index 00000000..b93f993e --- /dev/null +++ b/docs/models/completionargsstop.md @@ -0,0 +1,17 @@ +# CompletionArgsStop + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[str]` + +```python +value: List[str] = /* values here */ +``` + diff --git a/docs/models/completionchunk.md b/docs/models/completionchunk.md new file mode 100644 index 00000000..7f8ab5e6 --- /dev/null +++ b/docs/models/completionchunk.md @@ -0,0 +1,13 @@ +# CompletionChunk + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | +| `id` | *str* | :heavy_check_mark: | N/A | +| `object` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `created` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `model` | *str* | :heavy_check_mark: | N/A | +| `usage` | [Optional[models.UsageInfo]](../models/usageinfo.md) | :heavy_minus_sign: | N/A | +| `choices` | List[[models.CompletionResponseStreamChoice](../models/completionresponsestreamchoice.md)] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/completionevent.md b/docs/models/completionevent.md new file mode 100644 index 00000000..7a66e8fe --- /dev/null +++ b/docs/models/completionevent.md @@ -0,0 +1,8 @@ +# CompletionEvent + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------ | ------------------------------------------------------ | ------------------------------------------------------ | ------------------------------------------------------ | +| `data` | [models.CompletionChunk](../models/completionchunk.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/completionfinetunedmodel.md b/docs/models/completionfinetunedmodel.md new file mode 100644 index 00000000..0055db02 --- /dev/null +++ b/docs/models/completionfinetunedmodel.md @@ -0,0 +1,22 @@ +# CompletionFineTunedModel + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | +| `object` | *Optional[Literal["model"]]* | :heavy_minus_sign: | N/A | +| `created` | *int* | :heavy_check_mark: | N/A | +| `owned_by` | *str* | :heavy_check_mark: | N/A | +| `workspace_id` | *str* | :heavy_check_mark: | N/A | +| `root` | *str* | :heavy_check_mark: | N/A | +| `root_version` | *str* | :heavy_check_mark: | N/A | +| `archived` | *bool* | :heavy_check_mark: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `capabilities` | [models.FineTunedModelCapabilities](../models/finetunedmodelcapabilities.md) | :heavy_check_mark: | N/A | +| `max_context_length` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `aliases` | List[*str*] | :heavy_minus_sign: | N/A | +| `job` | *str* | :heavy_check_mark: | N/A | +| `model_type` | *Literal["completion"]* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/completionfinetuningjob.md b/docs/models/completionfinetuningjob.md new file mode 100644 index 00000000..83c0ae7e --- /dev/null +++ b/docs/models/completionfinetuningjob.md @@ -0,0 +1,24 @@ +# CompletionFineTuningJob + + +## Fields + +| Field | Type | Required | Description | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | The ID of the job. | +| `auto_start` | *bool* | :heavy_check_mark: | N/A | +| `model` | *str* | :heavy_check_mark: | N/A | +| `status` | [models.CompletionFineTuningJobStatus](../models/completionfinetuningjobstatus.md) | :heavy_check_mark: | The current status of the fine-tuning job. | +| `created_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) for when the fine-tuning job was created. | +| `modified_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) for when the fine-tuning job was last modified. | +| `training_files` | List[*str*] | :heavy_check_mark: | A list containing the IDs of uploaded files that contain training data. | +| `validation_files` | List[*str*] | :heavy_minus_sign: | A list containing the IDs of uploaded files that contain validation data. | +| `object` | *Optional[Literal["job"]]* | :heavy_minus_sign: | The object type of the fine-tuning job. | +| `fine_tuned_model` | *OptionalNullable[str]* | :heavy_minus_sign: | The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running. | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | +| `integrations` | List[[models.CompletionFineTuningJobIntegration](../models/completionfinetuningjobintegration.md)] | :heavy_minus_sign: | A list of integrations enabled for your fine-tuning job. | +| `trained_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | Total number of tokens trained. | +| `metadata` | [OptionalNullable[models.JobMetadata]](../models/jobmetadata.md) | :heavy_minus_sign: | N/A | +| `job_type` | *Literal["completion"]* | :heavy_check_mark: | The type of job (`FT` for fine-tuning). | +| `hyperparameters` | [models.CompletionTrainingParameters](../models/completiontrainingparameters.md) | :heavy_check_mark: | N/A | +| `repositories` | List[[models.CompletionFineTuningJobRepository](../models/completionfinetuningjobrepository.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/completionfinetuningjobdetails.md b/docs/models/completionfinetuningjobdetails.md new file mode 100644 index 00000000..3c54e874 --- /dev/null +++ b/docs/models/completionfinetuningjobdetails.md @@ -0,0 +1,26 @@ +# CompletionFineTuningJobDetails + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | +| `auto_start` | *bool* | :heavy_check_mark: | N/A | +| `model` | *str* | :heavy_check_mark: | N/A | +| `status` | [models.CompletionFineTuningJobDetailsStatus](../models/completionfinetuningjobdetailsstatus.md) | :heavy_check_mark: | N/A | +| `created_at` | *int* | :heavy_check_mark: | N/A | +| `modified_at` | *int* | :heavy_check_mark: | N/A | +| `training_files` | List[*str*] | :heavy_check_mark: | N/A | +| `validation_files` | List[*str*] | :heavy_minus_sign: | N/A | +| `object` | *Optional[Literal["job"]]* | :heavy_minus_sign: | N/A | +| `fine_tuned_model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `integrations` | List[[models.CompletionFineTuningJobDetailsIntegration](../models/completionfinetuningjobdetailsintegration.md)] | :heavy_minus_sign: | N/A | +| `trained_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `metadata` | [OptionalNullable[models.JobMetadata]](../models/jobmetadata.md) | :heavy_minus_sign: | N/A | +| `job_type` | *Literal["completion"]* | :heavy_check_mark: | N/A | +| `hyperparameters` | [models.CompletionTrainingParameters](../models/completiontrainingparameters.md) | :heavy_check_mark: | N/A | +| `repositories` | List[[models.CompletionFineTuningJobDetailsRepository](../models/completionfinetuningjobdetailsrepository.md)] | :heavy_minus_sign: | N/A | +| `events` | List[[models.Event](../models/event.md)] | :heavy_minus_sign: | Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here. | +| `checkpoints` | List[[models.Checkpoint](../models/checkpoint.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/completionfinetuningjobdetailsintegration.md b/docs/models/completionfinetuningjobdetailsintegration.md new file mode 100644 index 00000000..38f6a349 --- /dev/null +++ b/docs/models/completionfinetuningjobdetailsintegration.md @@ -0,0 +1,11 @@ +# CompletionFineTuningJobDetailsIntegration + + +## Supported Types + +### `models.WandbIntegrationResult` + +```python +value: models.WandbIntegrationResult = /* values here */ +``` + diff --git a/docs/models/completionfinetuningjobdetailsrepository.md b/docs/models/completionfinetuningjobdetailsrepository.md new file mode 100644 index 00000000..c6bd67cd --- /dev/null +++ b/docs/models/completionfinetuningjobdetailsrepository.md @@ -0,0 +1,11 @@ +# CompletionFineTuningJobDetailsRepository + + +## Supported Types + +### `models.GithubRepository` + +```python +value: models.GithubRepository = /* values here */ +``` + diff --git a/docs/models/completionfinetuningjobdetailsstatus.md b/docs/models/completionfinetuningjobdetailsstatus.md new file mode 100644 index 00000000..94d795a9 --- /dev/null +++ b/docs/models/completionfinetuningjobdetailsstatus.md @@ -0,0 +1,17 @@ +# CompletionFineTuningJobDetailsStatus + + +## Values + +| Name | Value | +| ------------------------ | ------------------------ | +| `QUEUED` | QUEUED | +| `STARTED` | STARTED | +| `VALIDATING` | VALIDATING | +| `VALIDATED` | VALIDATED | +| `RUNNING` | RUNNING | +| `FAILED_VALIDATION` | FAILED_VALIDATION | +| `FAILED` | FAILED | +| `SUCCESS` | SUCCESS | +| `CANCELLED` | CANCELLED | +| `CANCELLATION_REQUESTED` | CANCELLATION_REQUESTED | \ No newline at end of file diff --git a/docs/models/completionfinetuningjobintegration.md b/docs/models/completionfinetuningjobintegration.md new file mode 100644 index 00000000..dbe57417 --- /dev/null +++ b/docs/models/completionfinetuningjobintegration.md @@ -0,0 +1,11 @@ +# CompletionFineTuningJobIntegration + + +## Supported Types + +### `models.WandbIntegrationResult` + +```python +value: models.WandbIntegrationResult = /* values here */ +``` + diff --git a/docs/models/completionfinetuningjobrepository.md b/docs/models/completionfinetuningjobrepository.md new file mode 100644 index 00000000..54225e27 --- /dev/null +++ b/docs/models/completionfinetuningjobrepository.md @@ -0,0 +1,11 @@ +# CompletionFineTuningJobRepository + + +## Supported Types + +### `models.GithubRepository` + +```python +value: models.GithubRepository = /* values here */ +``` + diff --git a/docs/models/completionfinetuningjobstatus.md b/docs/models/completionfinetuningjobstatus.md new file mode 100644 index 00000000..db151a1b --- /dev/null +++ b/docs/models/completionfinetuningjobstatus.md @@ -0,0 +1,19 @@ +# CompletionFineTuningJobStatus + +The current status of the fine-tuning job. + + +## Values + +| Name | Value | +| ------------------------ | ------------------------ | +| `QUEUED` | QUEUED | +| `STARTED` | STARTED | +| `VALIDATING` | VALIDATING | +| `VALIDATED` | VALIDATED | +| `RUNNING` | RUNNING | +| `FAILED_VALIDATION` | FAILED_VALIDATION | +| `FAILED` | FAILED | +| `SUCCESS` | SUCCESS | +| `CANCELLED` | CANCELLED | +| `CANCELLATION_REQUESTED` | CANCELLATION_REQUESTED | \ No newline at end of file diff --git a/docs/models/completionresponsestreamchoice.md b/docs/models/completionresponsestreamchoice.md new file mode 100644 index 00000000..1532c25b --- /dev/null +++ b/docs/models/completionresponsestreamchoice.md @@ -0,0 +1,10 @@ +# CompletionResponseStreamChoice + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------- | +| `index` | *int* | :heavy_check_mark: | N/A | +| `delta` | [models.DeltaMessage](../models/deltamessage.md) | :heavy_check_mark: | N/A | +| `finish_reason` | [Nullable[models.CompletionResponseStreamChoiceFinishReason]](../models/completionresponsestreamchoicefinishreason.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/completionresponsestreamchoicefinishreason.md b/docs/models/completionresponsestreamchoicefinishreason.md new file mode 100644 index 00000000..0fece473 --- /dev/null +++ b/docs/models/completionresponsestreamchoicefinishreason.md @@ -0,0 +1,11 @@ +# CompletionResponseStreamChoiceFinishReason + + +## Values + +| Name | Value | +| ------------ | ------------ | +| `STOP` | stop | +| `LENGTH` | length | +| `ERROR` | error | +| `TOOL_CALLS` | tool_calls | \ No newline at end of file diff --git a/docs/models/completiontrainingparameters.md b/docs/models/completiontrainingparameters.md new file mode 100644 index 00000000..4746a95d --- /dev/null +++ b/docs/models/completiontrainingparameters.md @@ -0,0 +1,14 @@ +# CompletionTrainingParameters + + +## Fields + +| Field | Type | Required | Description | +| ------------------------- | ------------------------- | ------------------------- | ------------------------- | +| `training_steps` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `learning_rate` | *Optional[float]* | :heavy_minus_sign: | N/A | +| `weight_decay` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | +| `warmup_fraction` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | +| `epochs` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | +| `seq_len` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `fim_ratio` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/confirmation.md b/docs/models/confirmation.md new file mode 100644 index 00000000..fd6e6aaa --- /dev/null +++ b/docs/models/confirmation.md @@ -0,0 +1,9 @@ +# Confirmation + + +## Values + +| Name | Value | +| ------- | ------- | +| `ALLOW` | allow | +| `DENY` | deny | \ No newline at end of file diff --git a/docs/models/contentchunk.md b/docs/models/contentchunk.md new file mode 100644 index 00000000..cb7e51d3 --- /dev/null +++ b/docs/models/contentchunk.md @@ -0,0 +1,47 @@ +# ContentChunk + + +## Supported Types + +### `models.ImageURLChunk` + +```python +value: models.ImageURLChunk = /* values here */ +``` + +### `models.DocumentURLChunk` + +```python +value: models.DocumentURLChunk = /* values here */ +``` + +### `models.TextChunk` + +```python +value: models.TextChunk = /* values here */ +``` + +### `models.ReferenceChunk` + +```python +value: models.ReferenceChunk = /* values here */ +``` + +### `models.FileChunk` + +```python +value: models.FileChunk = /* values here */ +``` + +### `models.ThinkChunk` + +```python +value: models.ThinkChunk = /* values here */ +``` + +### `models.AudioChunk` + +```python +value: models.AudioChunk = /* values here */ +``` + diff --git a/docs/models/conversationappendrequest.md b/docs/models/conversationappendrequest.md new file mode 100644 index 00000000..78a96508 --- /dev/null +++ b/docs/models/conversationappendrequest.md @@ -0,0 +1,13 @@ +# ConversationAppendRequest + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------- | +| `inputs` | [Optional[models.ConversationInputs]](../models/conversationinputs.md) | :heavy_minus_sign: | N/A | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `store` | *Optional[bool]* | :heavy_minus_sign: | Whether to store the results into our servers or not. | +| `handoff_execution` | [Optional[models.ConversationAppendRequestHandoffExecution]](../models/conversationappendrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | +| `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `tool_confirmations` | List[[models.ToolCallConfirmation](../models/toolcallconfirmation.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/conversationappendrequesthandoffexecution.md b/docs/models/conversationappendrequesthandoffexecution.md new file mode 100644 index 00000000..7418b36a --- /dev/null +++ b/docs/models/conversationappendrequesthandoffexecution.md @@ -0,0 +1,9 @@ +# ConversationAppendRequestHandoffExecution + + +## Values + +| Name | Value | +| -------- | -------- | +| `CLIENT` | client | +| `SERVER` | server | \ No newline at end of file diff --git a/docs/models/conversationappendstreamrequest.md b/docs/models/conversationappendstreamrequest.md new file mode 100644 index 00000000..daea9c52 --- /dev/null +++ b/docs/models/conversationappendstreamrequest.md @@ -0,0 +1,13 @@ +# ConversationAppendStreamRequest + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- | +| `inputs` | [Optional[models.ConversationInputs]](../models/conversationinputs.md) | :heavy_minus_sign: | N/A | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `store` | *Optional[bool]* | :heavy_minus_sign: | Whether to store the results into our servers or not. | +| `handoff_execution` | [Optional[models.ConversationAppendStreamRequestHandoffExecution]](../models/conversationappendstreamrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | +| `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `tool_confirmations` | List[[models.ToolCallConfirmation](../models/toolcallconfirmation.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/conversationappendstreamrequesthandoffexecution.md b/docs/models/conversationappendstreamrequesthandoffexecution.md new file mode 100644 index 00000000..1bbced3e --- /dev/null +++ b/docs/models/conversationappendstreamrequesthandoffexecution.md @@ -0,0 +1,9 @@ +# ConversationAppendStreamRequestHandoffExecution + + +## Values + +| Name | Value | +| -------- | -------- | +| `CLIENT` | client | +| `SERVER` | server | \ No newline at end of file diff --git a/docs/models/conversationevents.md b/docs/models/conversationevents.md new file mode 100644 index 00000000..f1e2c4e9 --- /dev/null +++ b/docs/models/conversationevents.md @@ -0,0 +1,9 @@ +# ConversationEvents + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `event` | [models.SSETypes](../models/ssetypes.md) | :heavy_check_mark: | Server side events sent when streaming a conversation response. | +| `data` | [models.ConversationEventsData](../models/conversationeventsdata.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/conversationeventsdata.md b/docs/models/conversationeventsdata.md new file mode 100644 index 00000000..5452d7d5 --- /dev/null +++ b/docs/models/conversationeventsdata.md @@ -0,0 +1,65 @@ +# ConversationEventsData + + +## Supported Types + +### `models.AgentHandoffDoneEvent` + +```python +value: models.AgentHandoffDoneEvent = /* values here */ +``` + +### `models.AgentHandoffStartedEvent` + +```python +value: models.AgentHandoffStartedEvent = /* values here */ +``` + +### `models.ResponseDoneEvent` + +```python +value: models.ResponseDoneEvent = /* values here */ +``` + +### `models.ResponseErrorEvent` + +```python +value: models.ResponseErrorEvent = /* values here */ +``` + +### `models.ResponseStartedEvent` + +```python +value: models.ResponseStartedEvent = /* values here */ +``` + +### `models.FunctionCallEvent` + +```python +value: models.FunctionCallEvent = /* values here */ +``` + +### `models.MessageOutputEvent` + +```python +value: models.MessageOutputEvent = /* values here */ +``` + +### `models.ToolExecutionDeltaEvent` + +```python +value: models.ToolExecutionDeltaEvent = /* values here */ +``` + +### `models.ToolExecutionDoneEvent` + +```python +value: models.ToolExecutionDoneEvent = /* values here */ +``` + +### `models.ToolExecutionStartedEvent` + +```python +value: models.ToolExecutionStartedEvent = /* values here */ +``` + diff --git a/docs/models/conversationhistory.md b/docs/models/conversationhistory.md new file mode 100644 index 00000000..daefe336 --- /dev/null +++ b/docs/models/conversationhistory.md @@ -0,0 +1,12 @@ +# ConversationHistory + +Retrieve all entries in a conversation. + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------- | ------------------------------------------- | ------------------------------------------- | ------------------------------------------- | +| `object` | *Optional[Literal["conversation.history"]]* | :heavy_minus_sign: | N/A | +| `conversation_id` | *str* | :heavy_check_mark: | N/A | +| `entries` | List[[models.Entry](../models/entry.md)] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/conversationinputs.md b/docs/models/conversationinputs.md new file mode 100644 index 00000000..86db40ea --- /dev/null +++ b/docs/models/conversationinputs.md @@ -0,0 +1,17 @@ +# ConversationInputs + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[models.InputEntries]` + +```python +value: List[models.InputEntries] = /* values here */ +``` + diff --git a/docs/models/conversationmessages.md b/docs/models/conversationmessages.md new file mode 100644 index 00000000..8fa51571 --- /dev/null +++ b/docs/models/conversationmessages.md @@ -0,0 +1,12 @@ +# ConversationMessages + +Similar to the conversation history but only keep the messages + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | +| `object` | *Optional[Literal["conversation.messages"]]* | :heavy_minus_sign: | N/A | +| `conversation_id` | *str* | :heavy_check_mark: | N/A | +| `messages` | List[[models.MessageEntries](../models/messageentries.md)] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/conversationrequest.md b/docs/models/conversationrequest.md new file mode 100644 index 00000000..bd7823a8 --- /dev/null +++ b/docs/models/conversationrequest.md @@ -0,0 +1,20 @@ +# ConversationRequest + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------- | +| `inputs` | [models.ConversationInputs](../models/conversationinputs.md) | :heavy_check_mark: | N/A | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `store` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | +| `handoff_execution` | [OptionalNullable[models.ConversationRequestHandoffExecution]](../models/conversationrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | +| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `tools` | List[[models.ConversationRequestTool](../models/conversationrequesttool.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | +| `completion_args` | [OptionalNullable[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | +| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `agent_version` | [OptionalNullable[models.ConversationRequestAgentVersion]](../models/conversationrequestagentversion.md) | :heavy_minus_sign: | N/A | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/conversationrequestagentversion.md b/docs/models/conversationrequestagentversion.md new file mode 100644 index 00000000..9f251821 --- /dev/null +++ b/docs/models/conversationrequestagentversion.md @@ -0,0 +1,17 @@ +# ConversationRequestAgentVersion + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `int` + +```python +value: int = /* values here */ +``` + diff --git a/docs/models/conversationrequesthandoffexecution.md b/docs/models/conversationrequesthandoffexecution.md new file mode 100644 index 00000000..e7314f7e --- /dev/null +++ b/docs/models/conversationrequesthandoffexecution.md @@ -0,0 +1,9 @@ +# ConversationRequestHandoffExecution + + +## Values + +| Name | Value | +| -------- | -------- | +| `CLIENT` | client | +| `SERVER` | server | \ No newline at end of file diff --git a/docs/models/conversationrequesttool.md b/docs/models/conversationrequesttool.md new file mode 100644 index 00000000..2e4e8d01 --- /dev/null +++ b/docs/models/conversationrequesttool.md @@ -0,0 +1,41 @@ +# ConversationRequestTool + + +## Supported Types + +### `models.CodeInterpreterTool` + +```python +value: models.CodeInterpreterTool = /* values here */ +``` + +### `models.DocumentLibraryTool` + +```python +value: models.DocumentLibraryTool = /* values here */ +``` + +### `models.FunctionTool` + +```python +value: models.FunctionTool = /* values here */ +``` + +### `models.ImageGenerationTool` + +```python +value: models.ImageGenerationTool = /* values here */ +``` + +### `models.WebSearchTool` + +```python +value: models.WebSearchTool = /* values here */ +``` + +### `models.WebSearchPremiumTool` + +```python +value: models.WebSearchPremiumTool = /* values here */ +``` + diff --git a/docs/models/conversationresponse.md b/docs/models/conversationresponse.md new file mode 100644 index 00000000..2732f785 --- /dev/null +++ b/docs/models/conversationresponse.md @@ -0,0 +1,13 @@ +# ConversationResponse + +The response after appending new entries to the conversation. + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------ | ------------------------------------------------------------------ | ------------------------------------------------------------------ | ------------------------------------------------------------------ | +| `object` | *Optional[Literal["conversation.response"]]* | :heavy_minus_sign: | N/A | +| `conversation_id` | *str* | :heavy_check_mark: | N/A | +| `outputs` | List[[models.Output](../models/output.md)] | :heavy_check_mark: | N/A | +| `usage` | [models.ConversationUsageInfo](../models/conversationusageinfo.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/conversationrestartrequest.md b/docs/models/conversationrestartrequest.md new file mode 100644 index 00000000..ad3ff362 --- /dev/null +++ b/docs/models/conversationrestartrequest.md @@ -0,0 +1,17 @@ +# ConversationRestartRequest + +Request to restart a new conversation from a given entry in the conversation. + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------- | +| `inputs` | [Optional[models.ConversationInputs]](../models/conversationinputs.md) | :heavy_minus_sign: | N/A | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `store` | *Optional[bool]* | :heavy_minus_sign: | Whether to store the results into our servers or not. | +| `handoff_execution` | [Optional[models.ConversationRestartRequestHandoffExecution]](../models/conversationrestartrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | +| `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | Custom metadata for the conversation. | +| `from_entry_id` | *str* | :heavy_check_mark: | N/A | +| `agent_version` | [OptionalNullable[models.ConversationRestartRequestAgentVersion]](../models/conversationrestartrequestagentversion.md) | :heavy_minus_sign: | Specific version of the agent to use when restarting. If not provided, uses the current version. | \ No newline at end of file diff --git a/docs/models/conversationrestartrequestagentversion.md b/docs/models/conversationrestartrequestagentversion.md new file mode 100644 index 00000000..019ba301 --- /dev/null +++ b/docs/models/conversationrestartrequestagentversion.md @@ -0,0 +1,19 @@ +# ConversationRestartRequestAgentVersion + +Specific version of the agent to use when restarting. If not provided, uses the current version. + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `int` + +```python +value: int = /* values here */ +``` + diff --git a/docs/models/conversationrestartrequesthandoffexecution.md b/docs/models/conversationrestartrequesthandoffexecution.md new file mode 100644 index 00000000..5790624b --- /dev/null +++ b/docs/models/conversationrestartrequesthandoffexecution.md @@ -0,0 +1,9 @@ +# ConversationRestartRequestHandoffExecution + + +## Values + +| Name | Value | +| -------- | -------- | +| `CLIENT` | client | +| `SERVER` | server | \ No newline at end of file diff --git a/docs/models/conversationrestartstreamrequest.md b/docs/models/conversationrestartstreamrequest.md new file mode 100644 index 00000000..865a1e8f --- /dev/null +++ b/docs/models/conversationrestartstreamrequest.md @@ -0,0 +1,17 @@ +# ConversationRestartStreamRequest + +Request to restart a new conversation from a given entry in the conversation. + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | +| `inputs` | [Optional[models.ConversationInputs]](../models/conversationinputs.md) | :heavy_minus_sign: | N/A | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `store` | *Optional[bool]* | :heavy_minus_sign: | Whether to store the results into our servers or not. | +| `handoff_execution` | [Optional[models.ConversationRestartStreamRequestHandoffExecution]](../models/conversationrestartstreamrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | +| `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | Custom metadata for the conversation. | +| `from_entry_id` | *str* | :heavy_check_mark: | N/A | +| `agent_version` | [OptionalNullable[models.ConversationRestartStreamRequestAgentVersion]](../models/conversationrestartstreamrequestagentversion.md) | :heavy_minus_sign: | Specific version of the agent to use when restarting. If not provided, uses the current version. | \ No newline at end of file diff --git a/docs/models/conversationrestartstreamrequestagentversion.md b/docs/models/conversationrestartstreamrequestagentversion.md new file mode 100644 index 00000000..9e006300 --- /dev/null +++ b/docs/models/conversationrestartstreamrequestagentversion.md @@ -0,0 +1,19 @@ +# ConversationRestartStreamRequestAgentVersion + +Specific version of the agent to use when restarting. If not provided, uses the current version. + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `int` + +```python +value: int = /* values here */ +``` + diff --git a/docs/models/conversationrestartstreamrequesthandoffexecution.md b/docs/models/conversationrestartstreamrequesthandoffexecution.md new file mode 100644 index 00000000..97266b43 --- /dev/null +++ b/docs/models/conversationrestartstreamrequesthandoffexecution.md @@ -0,0 +1,9 @@ +# ConversationRestartStreamRequestHandoffExecution + + +## Values + +| Name | Value | +| -------- | -------- | +| `CLIENT` | client | +| `SERVER` | server | \ No newline at end of file diff --git a/docs/models/conversationstreamrequest.md b/docs/models/conversationstreamrequest.md new file mode 100644 index 00000000..8b74f9e7 --- /dev/null +++ b/docs/models/conversationstreamrequest.md @@ -0,0 +1,20 @@ +# ConversationStreamRequest + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------- | +| `inputs` | [models.ConversationInputs](../models/conversationinputs.md) | :heavy_check_mark: | N/A | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `store` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | +| `handoff_execution` | [OptionalNullable[models.ConversationStreamRequestHandoffExecution]](../models/conversationstreamrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | +| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `tools` | List[[models.ConversationStreamRequestTool](../models/conversationstreamrequesttool.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | +| `completion_args` | [OptionalNullable[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | +| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `agent_version` | [OptionalNullable[models.ConversationStreamRequestAgentVersion]](../models/conversationstreamrequestagentversion.md) | :heavy_minus_sign: | N/A | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/conversationstreamrequestagentversion.md b/docs/models/conversationstreamrequestagentversion.md new file mode 100644 index 00000000..52ee9672 --- /dev/null +++ b/docs/models/conversationstreamrequestagentversion.md @@ -0,0 +1,17 @@ +# ConversationStreamRequestAgentVersion + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `int` + +```python +value: int = /* values here */ +``` + diff --git a/docs/models/conversationstreamrequesthandoffexecution.md b/docs/models/conversationstreamrequesthandoffexecution.md new file mode 100644 index 00000000..c98e194c --- /dev/null +++ b/docs/models/conversationstreamrequesthandoffexecution.md @@ -0,0 +1,9 @@ +# ConversationStreamRequestHandoffExecution + + +## Values + +| Name | Value | +| -------- | -------- | +| `CLIENT` | client | +| `SERVER` | server | \ No newline at end of file diff --git a/docs/models/conversationstreamrequesttool.md b/docs/models/conversationstreamrequesttool.md new file mode 100644 index 00000000..0f75f82b --- /dev/null +++ b/docs/models/conversationstreamrequesttool.md @@ -0,0 +1,41 @@ +# ConversationStreamRequestTool + + +## Supported Types + +### `models.CodeInterpreterTool` + +```python +value: models.CodeInterpreterTool = /* values here */ +``` + +### `models.DocumentLibraryTool` + +```python +value: models.DocumentLibraryTool = /* values here */ +``` + +### `models.FunctionTool` + +```python +value: models.FunctionTool = /* values here */ +``` + +### `models.ImageGenerationTool` + +```python +value: models.ImageGenerationTool = /* values here */ +``` + +### `models.WebSearchTool` + +```python +value: models.WebSearchTool = /* values here */ +``` + +### `models.WebSearchPremiumTool` + +```python +value: models.WebSearchPremiumTool = /* values here */ +``` + diff --git a/docs/models/conversationthinkchunk.md b/docs/models/conversationthinkchunk.md new file mode 100644 index 00000000..1fb16bd9 --- /dev/null +++ b/docs/models/conversationthinkchunk.md @@ -0,0 +1,10 @@ +# ConversationThinkChunk + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | +| `type` | *Optional[Literal["thinking"]]* | :heavy_minus_sign: | N/A | +| `thinking` | List[[models.ConversationThinkChunkThinking](../models/conversationthinkchunkthinking.md)] | :heavy_check_mark: | N/A | +| `closed` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/conversationthinkchunkthinking.md b/docs/models/conversationthinkchunkthinking.md new file mode 100644 index 00000000..84b80018 --- /dev/null +++ b/docs/models/conversationthinkchunkthinking.md @@ -0,0 +1,17 @@ +# ConversationThinkChunkThinking + + +## Supported Types + +### `models.TextChunk` + +```python +value: models.TextChunk = /* values here */ +``` + +### `models.ToolReferenceChunk` + +```python +value: models.ToolReferenceChunk = /* values here */ +``` + diff --git a/docs/models/conversationusageinfo.md b/docs/models/conversationusageinfo.md new file mode 100644 index 00000000..57e26033 --- /dev/null +++ b/docs/models/conversationusageinfo.md @@ -0,0 +1,12 @@ +# ConversationUsageInfo + + +## Fields + +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `prompt_tokens` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `completion_tokens` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `total_tokens` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `connector_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `connectors` | Dict[str, *int*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/createagentrequest.md b/docs/models/createagentrequest.md new file mode 100644 index 00000000..cca3a079 --- /dev/null +++ b/docs/models/createagentrequest.md @@ -0,0 +1,16 @@ +# CreateAgentRequest + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | +| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | Instruction prompt the model will follow during the conversation. | +| `tools` | List[[models.CreateAgentRequestTool](../models/createagentrequesttool.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | +| `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `model` | *str* | :heavy_check_mark: | N/A | +| `name` | *str* | :heavy_check_mark: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `handoffs` | List[*str*] | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | +| `version_message` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/createagentrequesttool.md b/docs/models/createagentrequesttool.md new file mode 100644 index 00000000..c6ed3e98 --- /dev/null +++ b/docs/models/createagentrequesttool.md @@ -0,0 +1,41 @@ +# CreateAgentRequestTool + + +## Supported Types + +### `models.CodeInterpreterTool` + +```python +value: models.CodeInterpreterTool = /* values here */ +``` + +### `models.DocumentLibraryTool` + +```python +value: models.DocumentLibraryTool = /* values here */ +``` + +### `models.FunctionTool` + +```python +value: models.FunctionTool = /* values here */ +``` + +### `models.ImageGenerationTool` + +```python +value: models.ImageGenerationTool = /* values here */ +``` + +### `models.WebSearchTool` + +```python +value: models.WebSearchTool = /* values here */ +``` + +### `models.WebSearchPremiumTool` + +```python +value: models.WebSearchPremiumTool = /* values here */ +``` + diff --git a/docs/models/createbatchjobrequest.md b/docs/models/createbatchjobrequest.md new file mode 100644 index 00000000..d094e2d5 --- /dev/null +++ b/docs/models/createbatchjobrequest.md @@ -0,0 +1,14 @@ +# CreateBatchJobRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `input_files` | List[*str*] | :heavy_minus_sign: | The list of input files to be used for batch inference, these files should be `jsonl` files, containing the input data corresponding to the bory request for the batch inference in a "body" field. An example of such file is the following: ```json {"custom_id": "0", "body": {"max_tokens": 100, "messages": [{"role": "user", "content": "What is the best French cheese?"}]}} {"custom_id": "1", "body": {"max_tokens": 100, "messages": [{"role": "user", "content": "What is the best French wine?"}]}} ``` | | +| `requests` | List[[models.BatchRequest](../models/batchrequest.md)] | :heavy_minus_sign: | N/A | | +| `endpoint` | [models.APIEndpoint](../models/apiendpoint.md) | :heavy_check_mark: | N/A | | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The model to be used for batch inference. | **Example 1:** mistral-small-latest
**Example 2:** mistral-medium-latest | +| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | In case you want to use a specific agent from the **deprecated** agents api for batch inference, you can specify the agent ID here. | | +| `metadata` | Dict[str, *str*] | :heavy_minus_sign: | The metadata of your choice to be associated with the batch inference job. | | +| `timeout_hours` | *Optional[int]* | :heavy_minus_sign: | The timeout in hours for the batch inference job. | | \ No newline at end of file diff --git a/docs/models/createfileresponse.md b/docs/models/createfileresponse.md new file mode 100644 index 00000000..8152922b --- /dev/null +++ b/docs/models/createfileresponse.md @@ -0,0 +1,18 @@ +# CreateFileResponse + + +## Fields + +| Field | Type | Required | Description | Example | +| ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | The unique identifier of the file. | 497f6eca-6276-4993-bfeb-53cbbbba6f09 | +| `object` | *str* | :heavy_check_mark: | The object type, which is always "file". | file | +| `size_bytes` | *int* | :heavy_check_mark: | The size of the file, in bytes. | 13000 | +| `created_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) of the event. | 1716963433 | +| `filename` | *str* | :heavy_check_mark: | The name of the uploaded file. | files_upload.jsonl | +| `purpose` | [models.FilePurpose](../models/filepurpose.md) | :heavy_check_mark: | N/A | | +| `sample_type` | [models.SampleType](../models/sampletype.md) | :heavy_check_mark: | N/A | | +| `num_lines` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | +| `mimetype` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | +| `source` | [models.Source](../models/source.md) | :heavy_check_mark: | N/A | | +| `signature` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/createfinetuningjobrequest.md b/docs/models/createfinetuningjobrequest.md new file mode 100644 index 00000000..a93e323d --- /dev/null +++ b/docs/models/createfinetuningjobrequest.md @@ -0,0 +1,18 @@ +# CreateFineTuningJobRequest + + +## Fields + +| Field | Type | Required | Description | +| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | N/A | +| `training_files` | List[[models.TrainingFile](../models/trainingfile.md)] | :heavy_minus_sign: | N/A | +| `validation_files` | List[*str*] | :heavy_minus_sign: | A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files. | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | A string that will be added to your fine-tuning model name. For example, a suffix of "my-great-model" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...` | +| `integrations` | List[[models.CreateFineTuningJobRequestIntegration](../models/createfinetuningjobrequestintegration.md)] | :heavy_minus_sign: | A list of integrations to enable for your fine-tuning job. | +| `auto_start` | *Optional[bool]* | :heavy_minus_sign: | This field will be required in a future release. | +| `invalid_sample_skip_percentage` | *Optional[float]* | :heavy_minus_sign: | N/A | +| `job_type` | [OptionalNullable[models.FineTuneableModelType]](../models/finetuneablemodeltype.md) | :heavy_minus_sign: | N/A | +| `hyperparameters` | [models.Hyperparameters](../models/hyperparameters.md) | :heavy_check_mark: | N/A | +| `repositories` | List[[models.CreateFineTuningJobRequestRepository](../models/createfinetuningjobrequestrepository.md)] | :heavy_minus_sign: | N/A | +| `classifier_targets` | List[[models.ClassifierTarget](../models/classifiertarget.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/createfinetuningjobrequestintegration.md b/docs/models/createfinetuningjobrequestintegration.md new file mode 100644 index 00000000..0054a4a6 --- /dev/null +++ b/docs/models/createfinetuningjobrequestintegration.md @@ -0,0 +1,11 @@ +# CreateFineTuningJobRequestIntegration + + +## Supported Types + +### `models.WandbIntegration` + +```python +value: models.WandbIntegration = /* values here */ +``` + diff --git a/docs/models/createfinetuningjobrequestrepository.md b/docs/models/createfinetuningjobrequestrepository.md new file mode 100644 index 00000000..32be1b6d --- /dev/null +++ b/docs/models/createfinetuningjobrequestrepository.md @@ -0,0 +1,11 @@ +# CreateFineTuningJobRequestRepository + + +## Supported Types + +### `models.GithubRepositoryIn` + +```python +value: models.GithubRepositoryIn = /* values here */ +``` + diff --git a/docs/models/createlibraryrequest.md b/docs/models/createlibraryrequest.md new file mode 100644 index 00000000..71562806 --- /dev/null +++ b/docs/models/createlibraryrequest.md @@ -0,0 +1,10 @@ +# CreateLibraryRequest + + +## Fields + +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `name` | *str* | :heavy_check_mark: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `chunk_size` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/deletefileresponse.md b/docs/models/deletefileresponse.md new file mode 100644 index 00000000..188e2504 --- /dev/null +++ b/docs/models/deletefileresponse.md @@ -0,0 +1,10 @@ +# DeleteFileResponse + + +## Fields + +| Field | Type | Required | Description | Example | +| ------------------------------------ | ------------------------------------ | ------------------------------------ | ------------------------------------ | ------------------------------------ | +| `id` | *str* | :heavy_check_mark: | The ID of the deleted file. | 497f6eca-6276-4993-bfeb-53cbbbba6f09 | +| `object` | *str* | :heavy_check_mark: | The object type that was deleted | file | +| `deleted` | *bool* | :heavy_check_mark: | The deletion status. | false | \ No newline at end of file diff --git a/docs/models/deletemodelout.md b/docs/models/deletemodelout.md new file mode 100644 index 00000000..5fd4df7a --- /dev/null +++ b/docs/models/deletemodelout.md @@ -0,0 +1,10 @@ +# DeleteModelOut + + +## Fields + +| Field | Type | Required | Description | Example | +| --------------------------------------------- | --------------------------------------------- | --------------------------------------------- | --------------------------------------------- | --------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | The ID of the deleted model. | ft:open-mistral-7b:587a6b29:20240514:7e773925 | +| `object` | *Optional[str]* | :heavy_minus_sign: | The object type that was deleted | | +| `deleted` | *Optional[bool]* | :heavy_minus_sign: | The deletion status | true | \ No newline at end of file diff --git a/docs/models/deletemodelv1modelsmodeliddeleterequest.md b/docs/models/deletemodelv1modelsmodeliddeleterequest.md new file mode 100644 index 00000000..d9bc15fe --- /dev/null +++ b/docs/models/deletemodelv1modelsmodeliddeleterequest.md @@ -0,0 +1,8 @@ +# DeleteModelV1ModelsModelIDDeleteRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| --------------------------------------------- | --------------------------------------------- | --------------------------------------------- | --------------------------------------------- | --------------------------------------------- | +| `model_id` | *str* | :heavy_check_mark: | The ID of the model to delete. | ft:open-mistral-7b:587a6b29:20240514:7e773925 | \ No newline at end of file diff --git a/docs/models/deltamessage.md b/docs/models/deltamessage.md new file mode 100644 index 00000000..e0ee575f --- /dev/null +++ b/docs/models/deltamessage.md @@ -0,0 +1,10 @@ +# DeltaMessage + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | +| `role` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `content` | [OptionalNullable[models.DeltaMessageContent]](../models/deltamessagecontent.md) | :heavy_minus_sign: | N/A | +| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/deltamessagecontent.md b/docs/models/deltamessagecontent.md new file mode 100644 index 00000000..8142772d --- /dev/null +++ b/docs/models/deltamessagecontent.md @@ -0,0 +1,17 @@ +# DeltaMessageContent + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[models.ContentChunk]` + +```python +value: List[models.ContentChunk] = /* values here */ +``` + diff --git a/docs/models/document.md b/docs/models/document.md new file mode 100644 index 00000000..42c639a6 --- /dev/null +++ b/docs/models/document.md @@ -0,0 +1,27 @@ +# Document + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `hash` | *Nullable[str]* | :heavy_check_mark: | N/A | +| `mime_type` | *Nullable[str]* | :heavy_check_mark: | N/A | +| `extension` | *Nullable[str]* | :heavy_check_mark: | N/A | +| `size` | *Nullable[int]* | :heavy_check_mark: | N/A | +| `name` | *str* | :heavy_check_mark: | N/A | +| `summary` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | +| `last_processed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `number_of_pages` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `process_status` | [models.ProcessStatus](../models/processstatus.md) | :heavy_check_mark: | N/A | +| `uploaded_by_id` | *Nullable[str]* | :heavy_check_mark: | N/A | +| `uploaded_by_type` | *str* | :heavy_check_mark: | N/A | +| `tokens_processing_main_content` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `tokens_processing_summary` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `url` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `attributes` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | +| `processing_status` | *str* | :heavy_check_mark: | N/A | +| `tokens_processing_total` | *int* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/documentlibrarytool.md b/docs/models/documentlibrarytool.md new file mode 100644 index 00000000..95c3fa52 --- /dev/null +++ b/docs/models/documentlibrarytool.md @@ -0,0 +1,10 @@ +# DocumentLibraryTool + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `tool_configuration` | [OptionalNullable[models.ToolConfiguration]](../models/toolconfiguration.md) | :heavy_minus_sign: | N/A | +| `type` | *Literal["document_library"]* | :heavy_check_mark: | N/A | +| `library_ids` | List[*str*] | :heavy_check_mark: | Ids of the library in which to search. | \ No newline at end of file diff --git a/docs/models/documenttextcontent.md b/docs/models/documenttextcontent.md new file mode 100644 index 00000000..989f49e9 --- /dev/null +++ b/docs/models/documenttextcontent.md @@ -0,0 +1,8 @@ +# DocumentTextContent + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `text` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/documentunion.md b/docs/models/documentunion.md new file mode 100644 index 00000000..e573bd46 --- /dev/null +++ b/docs/models/documentunion.md @@ -0,0 +1,25 @@ +# DocumentUnion + +Document to run OCR on + + +## Supported Types + +### `models.FileChunk` + +```python +value: models.FileChunk = /* values here */ +``` + +### `models.DocumentURLChunk` + +```python +value: models.DocumentURLChunk = /* values here */ +``` + +### `models.ImageURLChunk` + +```python +value: models.ImageURLChunk = /* values here */ +``` + diff --git a/docs/models/documentupload.md b/docs/models/documentupload.md new file mode 100644 index 00000000..4e58a475 --- /dev/null +++ b/docs/models/documentupload.md @@ -0,0 +1,8 @@ +# DocumentUpload + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `file` | [models.File](../models/file.md) | :heavy_check_mark: | The File object (not file name) to be uploaded.
To upload a file and specify a custom file name you should format your request as such:
```bash
file=@path/to/your/file.jsonl;filename=custom_name.jsonl
```
Otherwise, you can just keep the original file name:
```bash
file=@path/to/your/file.jsonl
``` | \ No newline at end of file diff --git a/docs/models/documenturlchunk.md b/docs/models/documenturlchunk.md new file mode 100644 index 00000000..9dbfbe50 --- /dev/null +++ b/docs/models/documenturlchunk.md @@ -0,0 +1,10 @@ +# DocumentURLChunk + + +## Fields + +| Field | Type | Required | Description | +| ----------------------------------- | ----------------------------------- | ----------------------------------- | ----------------------------------- | +| `type` | *Optional[Literal["document_url"]]* | :heavy_minus_sign: | N/A | +| `document_url` | *str* | :heavy_check_mark: | N/A | +| `document_name` | *OptionalNullable[str]* | :heavy_minus_sign: | The filename of the document | \ No newline at end of file diff --git a/docs/models/embeddingdtype.md b/docs/models/embeddingdtype.md new file mode 100644 index 00000000..01656b0a --- /dev/null +++ b/docs/models/embeddingdtype.md @@ -0,0 +1,12 @@ +# EmbeddingDtype + + +## Values + +| Name | Value | +| --------- | --------- | +| `FLOAT` | float | +| `INT8` | int8 | +| `UINT8` | uint8 | +| `BINARY` | binary | +| `UBINARY` | ubinary | \ No newline at end of file diff --git a/docs/models/embeddingrequest.md b/docs/models/embeddingrequest.md new file mode 100644 index 00000000..71d139cd --- /dev/null +++ b/docs/models/embeddingrequest.md @@ -0,0 +1,13 @@ +# EmbeddingRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| ------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------ | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. | mistral-embed | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | +| `inputs` | [models.EmbeddingRequestInputs](../models/embeddingrequestinputs.md) | :heavy_check_mark: | Text to embed. | [
"Embed this sentence.",
"As well as this one."
] | +| `output_dimension` | *OptionalNullable[int]* | :heavy_minus_sign: | The dimension of the output embeddings when feature available. If not provided, a default output dimension will be used. | | +| `output_dtype` | [Optional[models.EmbeddingDtype]](../models/embeddingdtype.md) | :heavy_minus_sign: | N/A | | +| `encoding_format` | [Optional[models.EncodingFormat]](../models/encodingformat.md) | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/embeddingrequestinputs.md b/docs/models/embeddingrequestinputs.md new file mode 100644 index 00000000..a3f82c1c --- /dev/null +++ b/docs/models/embeddingrequestinputs.md @@ -0,0 +1,19 @@ +# EmbeddingRequestInputs + +Text to embed. + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[str]` + +```python +value: List[str] = /* values here */ +``` + diff --git a/docs/models/embeddingresponse.md b/docs/models/embeddingresponse.md new file mode 100644 index 00000000..2bd85b4d --- /dev/null +++ b/docs/models/embeddingresponse.md @@ -0,0 +1,12 @@ +# EmbeddingResponse + + +## Fields + +| Field | Type | Required | Description | Example | +| ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | +| `id` | *str* | :heavy_check_mark: | N/A | cmpl-e5cc70bb28c444948073e77776eb30ef | +| `object` | *str* | :heavy_check_mark: | N/A | chat.completion | +| `model` | *str* | :heavy_check_mark: | N/A | mistral-small-latest | +| `usage` | [models.UsageInfo](../models/usageinfo.md) | :heavy_check_mark: | N/A | | +| `data` | List[[models.EmbeddingResponseData](../models/embeddingresponsedata.md)] | :heavy_check_mark: | N/A | | \ No newline at end of file diff --git a/docs/models/embeddingresponsedata.md b/docs/models/embeddingresponsedata.md new file mode 100644 index 00000000..20b50618 --- /dev/null +++ b/docs/models/embeddingresponsedata.md @@ -0,0 +1,10 @@ +# EmbeddingResponseData + + +## Fields + +| Field | Type | Required | Description | Example | +| ------------------ | ------------------ | ------------------ | ------------------ | ------------------ | +| `object` | *Optional[str]* | :heavy_minus_sign: | N/A | embedding | +| `embedding` | List[*float*] | :heavy_minus_sign: | N/A | [
0.1,
0.2,
0.3
] | +| `index` | *Optional[int]* | :heavy_minus_sign: | N/A | 0 | \ No newline at end of file diff --git a/docs/models/encodingformat.md b/docs/models/encodingformat.md new file mode 100644 index 00000000..7d5941cf --- /dev/null +++ b/docs/models/encodingformat.md @@ -0,0 +1,9 @@ +# EncodingFormat + + +## Values + +| Name | Value | +| -------- | -------- | +| `FLOAT` | float | +| `BASE64` | base64 | \ No newline at end of file diff --git a/docs/models/entitytype.md b/docs/models/entitytype.md new file mode 100644 index 00000000..7c040b38 --- /dev/null +++ b/docs/models/entitytype.md @@ -0,0 +1,12 @@ +# EntityType + +The type of entity, used to share a library. + + +## Values + +| Name | Value | +| ----------- | ----------- | +| `USER` | User | +| `WORKSPACE` | Workspace | +| `ORG` | Org | \ No newline at end of file diff --git a/docs/models/entry.md b/docs/models/entry.md new file mode 100644 index 00000000..d934b677 --- /dev/null +++ b/docs/models/entry.md @@ -0,0 +1,41 @@ +# Entry + + +## Supported Types + +### `models.MessageInputEntry` + +```python +value: models.MessageInputEntry = /* values here */ +``` + +### `models.MessageOutputEntry` + +```python +value: models.MessageOutputEntry = /* values here */ +``` + +### `models.FunctionResultEntry` + +```python +value: models.FunctionResultEntry = /* values here */ +``` + +### `models.FunctionCallEntry` + +```python +value: models.FunctionCallEntry = /* values here */ +``` + +### `models.ToolExecutionEntry` + +```python +value: models.ToolExecutionEntry = /* values here */ +``` + +### `models.AgentHandoffEntry` + +```python +value: models.AgentHandoffEntry = /* values here */ +``` + diff --git a/docs/models/event.md b/docs/models/event.md new file mode 100644 index 00000000..3eebffca --- /dev/null +++ b/docs/models/event.md @@ -0,0 +1,10 @@ +# Event + + +## Fields + +| Field | Type | Required | Description | +| --------------------------------------------- | --------------------------------------------- | --------------------------------------------- | --------------------------------------------- | +| `name` | *str* | :heavy_check_mark: | The name of the event. | +| `data` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | +| `created_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) of the event. | \ No newline at end of file diff --git a/docs/models/file.md b/docs/models/file.md new file mode 100644 index 00000000..37cc418f --- /dev/null +++ b/docs/models/file.md @@ -0,0 +1,10 @@ +# File + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------- | -------------------------------------------- | -------------------------------------------- | -------------------------------------------- | +| `file_name` | *str* | :heavy_check_mark: | N/A | +| `content` | *Union[bytes, IO[bytes], io.BufferedReader]* | :heavy_check_mark: | N/A | +| `content_type` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/filechunk.md b/docs/models/filechunk.md new file mode 100644 index 00000000..18217114 --- /dev/null +++ b/docs/models/filechunk.md @@ -0,0 +1,9 @@ +# FileChunk + + +## Fields + +| Field | Type | Required | Description | +| --------------------------- | --------------------------- | --------------------------- | --------------------------- | +| `type` | *Optional[Literal["file"]]* | :heavy_minus_sign: | N/A | +| `file_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/filepurpose.md b/docs/models/filepurpose.md new file mode 100644 index 00000000..14cab13e --- /dev/null +++ b/docs/models/filepurpose.md @@ -0,0 +1,10 @@ +# FilePurpose + + +## Values + +| Name | Value | +| ----------- | ----------- | +| `FINE_TUNE` | fine-tune | +| `BATCH` | batch | +| `OCR` | ocr | \ No newline at end of file diff --git a/docs/models/filesapiroutesdeletefilerequest.md b/docs/models/filesapiroutesdeletefilerequest.md new file mode 100644 index 00000000..1b02c2db --- /dev/null +++ b/docs/models/filesapiroutesdeletefilerequest.md @@ -0,0 +1,8 @@ +# FilesAPIRoutesDeleteFileRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `file_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/filesapiroutesdownloadfilerequest.md b/docs/models/filesapiroutesdownloadfilerequest.md new file mode 100644 index 00000000..8b28cb0e --- /dev/null +++ b/docs/models/filesapiroutesdownloadfilerequest.md @@ -0,0 +1,8 @@ +# FilesAPIRoutesDownloadFileRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `file_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/filesapiroutesgetsignedurlrequest.md b/docs/models/filesapiroutesgetsignedurlrequest.md new file mode 100644 index 00000000..dbe3c801 --- /dev/null +++ b/docs/models/filesapiroutesgetsignedurlrequest.md @@ -0,0 +1,9 @@ +# FilesAPIRoutesGetSignedURLRequest + + +## Fields + +| Field | Type | Required | Description | +| --------------------------------------------------------------- | --------------------------------------------------------------- | --------------------------------------------------------------- | --------------------------------------------------------------- | +| `file_id` | *str* | :heavy_check_mark: | N/A | +| `expiry` | *Optional[int]* | :heavy_minus_sign: | Number of hours before the url becomes invalid. Defaults to 24h | \ No newline at end of file diff --git a/docs/models/filesapirouteslistfilesrequest.md b/docs/models/filesapirouteslistfilesrequest.md new file mode 100644 index 00000000..57d11722 --- /dev/null +++ b/docs/models/filesapirouteslistfilesrequest.md @@ -0,0 +1,15 @@ +# FilesAPIRoutesListFilesRequest + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | +| `page` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `include_total` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `sample_type` | List[[models.SampleType](../models/sampletype.md)] | :heavy_minus_sign: | N/A | +| `source` | List[[models.Source](../models/source.md)] | :heavy_minus_sign: | N/A | +| `search` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `purpose` | [OptionalNullable[models.FilePurpose]](../models/filepurpose.md) | :heavy_minus_sign: | N/A | +| `mimetypes` | List[*str*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/filesapiroutesretrievefilerequest.md b/docs/models/filesapiroutesretrievefilerequest.md new file mode 100644 index 00000000..961bae1f --- /dev/null +++ b/docs/models/filesapiroutesretrievefilerequest.md @@ -0,0 +1,8 @@ +# FilesAPIRoutesRetrieveFileRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `file_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/fileschema.md b/docs/models/fileschema.md new file mode 100644 index 00000000..4f3e72db --- /dev/null +++ b/docs/models/fileschema.md @@ -0,0 +1,18 @@ +# FileSchema + + +## Fields + +| Field | Type | Required | Description | Example | +| ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | The unique identifier of the file. | 497f6eca-6276-4993-bfeb-53cbbbba6f09 | +| `object` | *str* | :heavy_check_mark: | The object type, which is always "file". | file | +| `size_bytes` | *int* | :heavy_check_mark: | The size of the file, in bytes. | 13000 | +| `created_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) of the event. | 1716963433 | +| `filename` | *str* | :heavy_check_mark: | The name of the uploaded file. | files_upload.jsonl | +| `purpose` | [models.FilePurpose](../models/filepurpose.md) | :heavy_check_mark: | N/A | | +| `sample_type` | [models.SampleType](../models/sampletype.md) | :heavy_check_mark: | N/A | | +| `num_lines` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | +| `mimetype` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | +| `source` | [models.Source](../models/source.md) | :heavy_check_mark: | N/A | | +| `signature` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/fimcompletionrequest.md b/docs/models/fimcompletionrequest.md new file mode 100644 index 00000000..fde0b625 --- /dev/null +++ b/docs/models/fimcompletionrequest.md @@ -0,0 +1,18 @@ +# FIMCompletionRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | ID of the model with FIM to use. | codestral-latest | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | +| `stop` | [Optional[models.FIMCompletionRequestStop]](../models/fimcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | +| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | \ No newline at end of file diff --git a/docs/models/fimcompletionrequeststop.md b/docs/models/fimcompletionrequeststop.md new file mode 100644 index 00000000..a0dbb00a --- /dev/null +++ b/docs/models/fimcompletionrequeststop.md @@ -0,0 +1,19 @@ +# FIMCompletionRequestStop + +Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[str]` + +```python +value: List[str] = /* values here */ +``` + diff --git a/docs/models/fimcompletionresponse.md b/docs/models/fimcompletionresponse.md new file mode 100644 index 00000000..cd62d034 --- /dev/null +++ b/docs/models/fimcompletionresponse.md @@ -0,0 +1,13 @@ +# FIMCompletionResponse + + +## Fields + +| Field | Type | Required | Description | Example | +| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | cmpl-e5cc70bb28c444948073e77776eb30ef | +| `object` | *str* | :heavy_check_mark: | N/A | chat.completion | +| `model` | *str* | :heavy_check_mark: | N/A | codestral-latest | +| `usage` | [models.UsageInfo](../models/usageinfo.md) | :heavy_check_mark: | N/A | | +| `created` | *int* | :heavy_check_mark: | N/A | 1702256327 | +| `choices` | List[[models.ChatCompletionChoice](../models/chatcompletionchoice.md)] | :heavy_check_mark: | N/A | | \ No newline at end of file diff --git a/docs/models/fimcompletionstreamrequest.md b/docs/models/fimcompletionstreamrequest.md new file mode 100644 index 00000000..ba62d854 --- /dev/null +++ b/docs/models/fimcompletionstreamrequest.md @@ -0,0 +1,18 @@ +# FIMCompletionStreamRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | ID of the model with FIM to use. | codestral-latest | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `stop` | [Optional[models.FIMCompletionStreamRequestStop]](../models/fimcompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | +| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | \ No newline at end of file diff --git a/docs/models/fimcompletionstreamrequeststop.md b/docs/models/fimcompletionstreamrequeststop.md new file mode 100644 index 00000000..5a9e2ff0 --- /dev/null +++ b/docs/models/fimcompletionstreamrequeststop.md @@ -0,0 +1,19 @@ +# FIMCompletionStreamRequestStop + +Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[str]` + +```python +value: List[str] = /* values here */ +``` + diff --git a/docs/models/finetuneablemodeltype.md b/docs/models/finetuneablemodeltype.md new file mode 100644 index 00000000..34b24bd4 --- /dev/null +++ b/docs/models/finetuneablemodeltype.md @@ -0,0 +1,9 @@ +# FineTuneableModelType + + +## Values + +| Name | Value | +| ------------ | ------------ | +| `COMPLETION` | completion | +| `CLASSIFIER` | classifier | \ No newline at end of file diff --git a/docs/models/finetunedmodelcapabilities.md b/docs/models/finetunedmodelcapabilities.md new file mode 100644 index 00000000..d3203a2a --- /dev/null +++ b/docs/models/finetunedmodelcapabilities.md @@ -0,0 +1,12 @@ +# FineTunedModelCapabilities + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `completion_chat` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `completion_fim` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `function_calling` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `fine_tuning` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `classification` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/format_.md b/docs/models/format_.md new file mode 100644 index 00000000..97d286a4 --- /dev/null +++ b/docs/models/format_.md @@ -0,0 +1,11 @@ +# Format + +Format of the table + + +## Values + +| Name | Value | +| ---------- | ---------- | +| `MARKDOWN` | markdown | +| `HTML` | html | \ No newline at end of file diff --git a/docs/models/ftclassifierlossfunction.md b/docs/models/ftclassifierlossfunction.md new file mode 100644 index 00000000..919cdd38 --- /dev/null +++ b/docs/models/ftclassifierlossfunction.md @@ -0,0 +1,9 @@ +# FTClassifierLossFunction + + +## Values + +| Name | Value | +| -------------- | -------------- | +| `SINGLE_CLASS` | single_class | +| `MULTI_CLASS` | multi_class | \ No newline at end of file diff --git a/docs/models/ftmodelcard.md b/docs/models/ftmodelcard.md new file mode 100644 index 00000000..409f0526 --- /dev/null +++ b/docs/models/ftmodelcard.md @@ -0,0 +1,25 @@ +# FTModelCard + +Extra fields for fine-tuned models. + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | +| `object` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `created` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `owned_by` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `capabilities` | [models.ModelCapabilities](../models/modelcapabilities.md) | :heavy_check_mark: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `max_context_length` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `aliases` | List[*str*] | :heavy_minus_sign: | N/A | +| `deprecation` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `deprecation_replacement_model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `default_model_temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | +| `type` | *Literal["fine-tuned"]* | :heavy_check_mark: | N/A | +| `job` | *str* | :heavy_check_mark: | N/A | +| `root` | *str* | :heavy_check_mark: | N/A | +| `archived` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/function.md b/docs/models/function.md new file mode 100644 index 00000000..b2bdb3fe --- /dev/null +++ b/docs/models/function.md @@ -0,0 +1,11 @@ +# Function + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `name` | *str* | :heavy_check_mark: | N/A | +| `description` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `strict` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `parameters` | Dict[str, *Any*] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/functioncall.md b/docs/models/functioncall.md new file mode 100644 index 00000000..7ccd90dc --- /dev/null +++ b/docs/models/functioncall.md @@ -0,0 +1,9 @@ +# FunctionCall + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | +| `name` | *str* | :heavy_check_mark: | N/A | +| `arguments` | [models.Arguments](../models/arguments.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/functioncallentry.md b/docs/models/functioncallentry.md new file mode 100644 index 00000000..2843db9d --- /dev/null +++ b/docs/models/functioncallentry.md @@ -0,0 +1,18 @@ +# FunctionCallEntry + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------- | +| `object` | *Optional[Literal["entry"]]* | :heavy_minus_sign: | N/A | +| `type` | *Optional[Literal["function.call"]]* | :heavy_minus_sign: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `completed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `tool_call_id` | *str* | :heavy_check_mark: | N/A | +| `name` | *str* | :heavy_check_mark: | N/A | +| `arguments` | [models.FunctionCallEntryArguments](../models/functioncallentryarguments.md) | :heavy_check_mark: | N/A | +| `confirmation_status` | [OptionalNullable[models.FunctionCallEntryConfirmationStatus]](../models/functioncallentryconfirmationstatus.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/functioncallentryarguments.md b/docs/models/functioncallentryarguments.md new file mode 100644 index 00000000..f1f6e39e --- /dev/null +++ b/docs/models/functioncallentryarguments.md @@ -0,0 +1,17 @@ +# FunctionCallEntryArguments + + +## Supported Types + +### `Dict[str, Any]` + +```python +value: Dict[str, Any] = /* values here */ +``` + +### `str` + +```python +value: str = /* values here */ +``` + diff --git a/docs/models/functioncallentryconfirmationstatus.md b/docs/models/functioncallentryconfirmationstatus.md new file mode 100644 index 00000000..8948beb6 --- /dev/null +++ b/docs/models/functioncallentryconfirmationstatus.md @@ -0,0 +1,10 @@ +# FunctionCallEntryConfirmationStatus + + +## Values + +| Name | Value | +| --------- | --------- | +| `PENDING` | pending | +| `ALLOWED` | allowed | +| `DENIED` | denied | \ No newline at end of file diff --git a/docs/models/functioncallevent.md b/docs/models/functioncallevent.md new file mode 100644 index 00000000..0e3a36d6 --- /dev/null +++ b/docs/models/functioncallevent.md @@ -0,0 +1,17 @@ +# FunctionCallEvent + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------- | +| `type` | *Literal["function.call.delta"]* | :heavy_check_mark: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `id` | *str* | :heavy_check_mark: | N/A | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `name` | *str* | :heavy_check_mark: | N/A | +| `tool_call_id` | *str* | :heavy_check_mark: | N/A | +| `arguments` | *str* | :heavy_check_mark: | N/A | +| `confirmation_status` | [OptionalNullable[models.FunctionCallEventConfirmationStatus]](../models/functioncalleventconfirmationstatus.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/functioncalleventconfirmationstatus.md b/docs/models/functioncalleventconfirmationstatus.md new file mode 100644 index 00000000..4a3c8774 --- /dev/null +++ b/docs/models/functioncalleventconfirmationstatus.md @@ -0,0 +1,10 @@ +# FunctionCallEventConfirmationStatus + + +## Values + +| Name | Value | +| --------- | --------- | +| `PENDING` | pending | +| `ALLOWED` | allowed | +| `DENIED` | denied | \ No newline at end of file diff --git a/docs/models/functionname.md b/docs/models/functionname.md new file mode 100644 index 00000000..87d7b485 --- /dev/null +++ b/docs/models/functionname.md @@ -0,0 +1,10 @@ +# FunctionName + +this restriction of `Function` is used to select a specific function to call + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `name` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/functionresultentry.md b/docs/models/functionresultentry.md new file mode 100644 index 00000000..6a77abfd --- /dev/null +++ b/docs/models/functionresultentry.md @@ -0,0 +1,14 @@ +# FunctionResultEntry + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `object` | *Optional[Literal["entry"]]* | :heavy_minus_sign: | N/A | +| `type` | *Optional[Literal["function.result"]]* | :heavy_minus_sign: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `completed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `tool_call_id` | *str* | :heavy_check_mark: | N/A | +| `result` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/functiontool.md b/docs/models/functiontool.md new file mode 100644 index 00000000..0226b704 --- /dev/null +++ b/docs/models/functiontool.md @@ -0,0 +1,9 @@ +# FunctionTool + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------- | ---------------------------------------- | ---------------------------------------- | ---------------------------------------- | +| `type` | *Literal["function"]* | :heavy_check_mark: | N/A | +| `function` | [models.Function](../models/function.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/getfileresponse.md b/docs/models/getfileresponse.md new file mode 100644 index 00000000..0edd13e0 --- /dev/null +++ b/docs/models/getfileresponse.md @@ -0,0 +1,19 @@ +# GetFileResponse + + +## Fields + +| Field | Type | Required | Description | Example | +| ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | The unique identifier of the file. | 497f6eca-6276-4993-bfeb-53cbbbba6f09 | +| `object` | *str* | :heavy_check_mark: | The object type, which is always "file". | file | +| `size_bytes` | *int* | :heavy_check_mark: | The size of the file, in bytes. | 13000 | +| `created_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) of the event. | 1716963433 | +| `filename` | *str* | :heavy_check_mark: | The name of the uploaded file. | files_upload.jsonl | +| `purpose` | [models.FilePurpose](../models/filepurpose.md) | :heavy_check_mark: | N/A | | +| `sample_type` | [models.SampleType](../models/sampletype.md) | :heavy_check_mark: | N/A | | +| `num_lines` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | +| `mimetype` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | +| `source` | [models.Source](../models/source.md) | :heavy_check_mark: | N/A | | +| `signature` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | +| `deleted` | *bool* | :heavy_check_mark: | N/A | | \ No newline at end of file diff --git a/docs/models/getsignedurlresponse.md b/docs/models/getsignedurlresponse.md new file mode 100644 index 00000000..bde69323 --- /dev/null +++ b/docs/models/getsignedurlresponse.md @@ -0,0 +1,8 @@ +# GetSignedURLResponse + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `url` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/githubrepository.md b/docs/models/githubrepository.md new file mode 100644 index 00000000..827b6f34 --- /dev/null +++ b/docs/models/githubrepository.md @@ -0,0 +1,13 @@ +# GithubRepository + + +## Fields + +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `type` | *Literal["github"]* | :heavy_check_mark: | N/A | +| `name` | *str* | :heavy_check_mark: | N/A | +| `owner` | *str* | :heavy_check_mark: | N/A | +| `ref` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `weight` | *Optional[float]* | :heavy_minus_sign: | N/A | +| `commit_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/githubrepositoryin.md b/docs/models/githubrepositoryin.md new file mode 100644 index 00000000..241cf584 --- /dev/null +++ b/docs/models/githubrepositoryin.md @@ -0,0 +1,13 @@ +# GithubRepositoryIn + + +## Fields + +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `type` | *Literal["github"]* | :heavy_check_mark: | N/A | +| `name` | *str* | :heavy_check_mark: | N/A | +| `owner` | *str* | :heavy_check_mark: | N/A | +| `ref` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `weight` | *Optional[float]* | :heavy_minus_sign: | N/A | +| `token` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/hyperparameters.md b/docs/models/hyperparameters.md new file mode 100644 index 00000000..b6c00c36 --- /dev/null +++ b/docs/models/hyperparameters.md @@ -0,0 +1,17 @@ +# Hyperparameters + + +## Supported Types + +### `models.CompletionTrainingParameters` + +```python +value: models.CompletionTrainingParameters = /* values here */ +``` + +### `models.ClassifierTrainingParameters` + +```python +value: models.ClassifierTrainingParameters = /* values here */ +``` + diff --git a/docs/models/imagedetail.md b/docs/models/imagedetail.md new file mode 100644 index 00000000..1e5ba3fd --- /dev/null +++ b/docs/models/imagedetail.md @@ -0,0 +1,10 @@ +# ImageDetail + + +## Values + +| Name | Value | +| ------ | ------ | +| `LOW` | low | +| `AUTO` | auto | +| `HIGH` | high | \ No newline at end of file diff --git a/docs/models/imagegenerationtool.md b/docs/models/imagegenerationtool.md new file mode 100644 index 00000000..b476b6f2 --- /dev/null +++ b/docs/models/imagegenerationtool.md @@ -0,0 +1,9 @@ +# ImageGenerationTool + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `tool_configuration` | [OptionalNullable[models.ToolConfiguration]](../models/toolconfiguration.md) | :heavy_minus_sign: | N/A | +| `type` | *Literal["image_generation"]* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/imageurl.md b/docs/models/imageurl.md new file mode 100644 index 00000000..6358e0ac --- /dev/null +++ b/docs/models/imageurl.md @@ -0,0 +1,9 @@ +# ImageURL + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | +| `url` | *str* | :heavy_check_mark: | N/A | +| `detail` | [OptionalNullable[models.ImageDetail]](../models/imagedetail.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/imageurlchunk.md b/docs/models/imageurlchunk.md new file mode 100644 index 00000000..db0c53d2 --- /dev/null +++ b/docs/models/imageurlchunk.md @@ -0,0 +1,11 @@ +# ImageURLChunk + +{"type":"image_url","image_url":{"url":"data:image/png;base64,iVBORw0 + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | +| `type` | *Optional[Literal["image_url"]]* | :heavy_minus_sign: | N/A | +| `image_url` | [models.ImageURLUnion](../models/imageurlunion.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/imageurlunion.md b/docs/models/imageurlunion.md new file mode 100644 index 00000000..db97130f --- /dev/null +++ b/docs/models/imageurlunion.md @@ -0,0 +1,17 @@ +# ImageURLUnion + + +## Supported Types + +### `models.ImageURL` + +```python +value: models.ImageURL = /* values here */ +``` + +### `str` + +```python +value: str = /* values here */ +``` + diff --git a/docs/models/inputentries.md b/docs/models/inputentries.md new file mode 100644 index 00000000..b44a467d --- /dev/null +++ b/docs/models/inputentries.md @@ -0,0 +1,41 @@ +# InputEntries + + +## Supported Types + +### `models.MessageInputEntry` + +```python +value: models.MessageInputEntry = /* values here */ +``` + +### `models.MessageOutputEntry` + +```python +value: models.MessageOutputEntry = /* values here */ +``` + +### `models.FunctionResultEntry` + +```python +value: models.FunctionResultEntry = /* values here */ +``` + +### `models.FunctionCallEntry` + +```python +value: models.FunctionCallEntry = /* values here */ +``` + +### `models.ToolExecutionEntry` + +```python +value: models.ToolExecutionEntry = /* values here */ +``` + +### `models.AgentHandoffEntry` + +```python +value: models.AgentHandoffEntry = /* values here */ +``` + diff --git a/docs/models/inputs.md b/docs/models/inputs.md new file mode 100644 index 00000000..d5771207 --- /dev/null +++ b/docs/models/inputs.md @@ -0,0 +1,19 @@ +# Inputs + +Chat to classify + + +## Supported Types + +### `models.InstructRequest` + +```python +value: models.InstructRequest = /* values here */ +``` + +### `List[models.InstructRequest]` + +```python +value: List[models.InstructRequest] = /* values here */ +``` + diff --git a/docs/models/instructrequest.md b/docs/models/instructrequest.md new file mode 100644 index 00000000..5f0cdfff --- /dev/null +++ b/docs/models/instructrequest.md @@ -0,0 +1,8 @@ +# InstructRequest + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | +| `messages` | List[[models.InstructRequestMessage](../models/instructrequestmessage.md)] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/instructrequestmessage.md b/docs/models/instructrequestmessage.md new file mode 100644 index 00000000..57ed27ab --- /dev/null +++ b/docs/models/instructrequestmessage.md @@ -0,0 +1,29 @@ +# InstructRequestMessage + + +## Supported Types + +### `models.AssistantMessage` + +```python +value: models.AssistantMessage = /* values here */ +``` + +### `models.SystemMessage` + +```python +value: models.SystemMessage = /* values here */ +``` + +### `models.ToolMessage` + +```python +value: models.ToolMessage = /* values here */ +``` + +### `models.UserMessage` + +```python +value: models.UserMessage = /* values here */ +``` + diff --git a/docs/models/jobmetadata.md b/docs/models/jobmetadata.md new file mode 100644 index 00000000..5d8a89dd --- /dev/null +++ b/docs/models/jobmetadata.md @@ -0,0 +1,14 @@ +# JobMetadata + + +## Fields + +| Field | Type | Required | Description | +| --------------------------- | --------------------------- | --------------------------- | --------------------------- | +| `expected_duration_seconds` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `cost` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | +| `cost_currency` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `train_tokens_per_step` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `train_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `data_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `estimated_start_time` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/jobsapiroutesbatchcancelbatchjobrequest.md b/docs/models/jobsapiroutesbatchcancelbatchjobrequest.md new file mode 100644 index 00000000..c19d0241 --- /dev/null +++ b/docs/models/jobsapiroutesbatchcancelbatchjobrequest.md @@ -0,0 +1,8 @@ +# JobsAPIRoutesBatchCancelBatchJobRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `job_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/jobsapiroutesbatchgetbatchjobrequest.md b/docs/models/jobsapiroutesbatchgetbatchjobrequest.md new file mode 100644 index 00000000..8c259bea --- /dev/null +++ b/docs/models/jobsapiroutesbatchgetbatchjobrequest.md @@ -0,0 +1,9 @@ +# JobsAPIRoutesBatchGetBatchJobRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------------ | ------------------------ | ------------------------ | ------------------------ | +| `job_id` | *str* | :heavy_check_mark: | N/A | +| `inline` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/jobsapiroutesbatchgetbatchjobsrequest.md b/docs/models/jobsapiroutesbatchgetbatchjobsrequest.md new file mode 100644 index 00000000..5ceb0b2c --- /dev/null +++ b/docs/models/jobsapiroutesbatchgetbatchjobsrequest.md @@ -0,0 +1,16 @@ +# JobsAPIRoutesBatchGetBatchJobsRequest + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `page` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | +| `created_after` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `created_by_me` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `status` | List[[models.BatchJobStatus](../models/batchjobstatus.md)] | :heavy_minus_sign: | N/A | +| `order_by` | [Optional[models.OrderBy]](../models/orderby.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/jobsapiroutesfinetuningarchivefinetunedmodelrequest.md b/docs/models/jobsapiroutesfinetuningarchivefinetunedmodelrequest.md new file mode 100644 index 00000000..f9700df5 --- /dev/null +++ b/docs/models/jobsapiroutesfinetuningarchivefinetunedmodelrequest.md @@ -0,0 +1,8 @@ +# JobsAPIRoutesFineTuningArchiveFineTunedModelRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| --------------------------------------------- | --------------------------------------------- | --------------------------------------------- | --------------------------------------------- | --------------------------------------------- | +| `model_id` | *str* | :heavy_check_mark: | The ID of the model to archive. | ft:open-mistral-7b:587a6b29:20240514:7e773925 | \ No newline at end of file diff --git a/docs/models/jobsapiroutesfinetuningcancelfinetuningjobrequest.md b/docs/models/jobsapiroutesfinetuningcancelfinetuningjobrequest.md new file mode 100644 index 00000000..883cbac6 --- /dev/null +++ b/docs/models/jobsapiroutesfinetuningcancelfinetuningjobrequest.md @@ -0,0 +1,8 @@ +# JobsAPIRoutesFineTuningCancelFineTuningJobRequest + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------- | ---------------------------- | ---------------------------- | ---------------------------- | +| `job_id` | *str* | :heavy_check_mark: | The ID of the job to cancel. | \ No newline at end of file diff --git a/docs/models/jobsapiroutesfinetuningcancelfinetuningjobresponse.md b/docs/models/jobsapiroutesfinetuningcancelfinetuningjobresponse.md new file mode 100644 index 00000000..fb62eb62 --- /dev/null +++ b/docs/models/jobsapiroutesfinetuningcancelfinetuningjobresponse.md @@ -0,0 +1,19 @@ +# JobsAPIRoutesFineTuningCancelFineTuningJobResponse + +OK + + +## Supported Types + +### `models.ClassifierFineTuningJobDetails` + +```python +value: models.ClassifierFineTuningJobDetails = /* values here */ +``` + +### `models.CompletionFineTuningJobDetails` + +```python +value: models.CompletionFineTuningJobDetails = /* values here */ +``` + diff --git a/docs/models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md b/docs/models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md new file mode 100644 index 00000000..7b52e2ca --- /dev/null +++ b/docs/models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md @@ -0,0 +1,19 @@ +# JobsAPIRoutesFineTuningCreateFineTuningJobResponse + +OK + + +## Supported Types + +### `models.Response` + +```python +value: models.Response = /* values here */ +``` + +### `models.LegacyJobMetadata` + +```python +value: models.LegacyJobMetadata = /* values here */ +``` + diff --git a/docs/models/jobsapiroutesfinetuninggetfinetuningjobrequest.md b/docs/models/jobsapiroutesfinetuninggetfinetuningjobrequest.md new file mode 100644 index 00000000..fde19800 --- /dev/null +++ b/docs/models/jobsapiroutesfinetuninggetfinetuningjobrequest.md @@ -0,0 +1,8 @@ +# JobsAPIRoutesFineTuningGetFineTuningJobRequest + + +## Fields + +| Field | Type | Required | Description | +| ----------------------------- | ----------------------------- | ----------------------------- | ----------------------------- | +| `job_id` | *str* | :heavy_check_mark: | The ID of the job to analyse. | \ No newline at end of file diff --git a/docs/models/jobsapiroutesfinetuninggetfinetuningjobresponse.md b/docs/models/jobsapiroutesfinetuninggetfinetuningjobresponse.md new file mode 100644 index 00000000..f7705327 --- /dev/null +++ b/docs/models/jobsapiroutesfinetuninggetfinetuningjobresponse.md @@ -0,0 +1,19 @@ +# JobsAPIRoutesFineTuningGetFineTuningJobResponse + +OK + + +## Supported Types + +### `models.ClassifierFineTuningJobDetails` + +```python +value: models.ClassifierFineTuningJobDetails = /* values here */ +``` + +### `models.CompletionFineTuningJobDetails` + +```python +value: models.CompletionFineTuningJobDetails = /* values here */ +``` + diff --git a/docs/models/jobsapiroutesfinetuninggetfinetuningjobsrequest.md b/docs/models/jobsapiroutesfinetuninggetfinetuningjobsrequest.md new file mode 100644 index 00000000..23c52c34 --- /dev/null +++ b/docs/models/jobsapiroutesfinetuninggetfinetuningjobsrequest.md @@ -0,0 +1,17 @@ +# JobsAPIRoutesFineTuningGetFineTuningJobsRequest + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------- | +| `page` | *Optional[int]* | :heavy_minus_sign: | The page number of the results to be returned. | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | The number of items to return per page. | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The model name used for fine-tuning to filter on. When set, the other results are not displayed. | +| `created_after` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | The date/time to filter on. When set, the results for previous creation times are not displayed. | +| `created_before` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `created_by_me` | *Optional[bool]* | :heavy_minus_sign: | When set, only return results for jobs created by the API caller. Other results are not displayed. | +| `status` | [OptionalNullable[models.JobsAPIRoutesFineTuningGetFineTuningJobsStatus]](../models/jobsapiroutesfinetuninggetfinetuningjobsstatus.md) | :heavy_minus_sign: | The current job state to filter on. When set, the other results are not displayed. | +| `wandb_project` | *OptionalNullable[str]* | :heavy_minus_sign: | The Weights and Biases project to filter on. When set, the other results are not displayed. | +| `wandb_name` | *OptionalNullable[str]* | :heavy_minus_sign: | The Weight and Biases run name to filter on. When set, the other results are not displayed. | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | The model suffix to filter on. When set, the other results are not displayed. | \ No newline at end of file diff --git a/docs/models/jobsapiroutesfinetuninggetfinetuningjobsstatus.md b/docs/models/jobsapiroutesfinetuninggetfinetuningjobsstatus.md new file mode 100644 index 00000000..40d57686 --- /dev/null +++ b/docs/models/jobsapiroutesfinetuninggetfinetuningjobsstatus.md @@ -0,0 +1,19 @@ +# JobsAPIRoutesFineTuningGetFineTuningJobsStatus + +The current job state to filter on. When set, the other results are not displayed. + + +## Values + +| Name | Value | +| ------------------------ | ------------------------ | +| `QUEUED` | QUEUED | +| `STARTED` | STARTED | +| `VALIDATING` | VALIDATING | +| `VALIDATED` | VALIDATED | +| `RUNNING` | RUNNING | +| `FAILED_VALIDATION` | FAILED_VALIDATION | +| `FAILED` | FAILED | +| `SUCCESS` | SUCCESS | +| `CANCELLED` | CANCELLED | +| `CANCELLATION_REQUESTED` | CANCELLATION_REQUESTED | \ No newline at end of file diff --git a/docs/models/jobsapiroutesfinetuningstartfinetuningjobrequest.md b/docs/models/jobsapiroutesfinetuningstartfinetuningjobrequest.md new file mode 100644 index 00000000..4429fe48 --- /dev/null +++ b/docs/models/jobsapiroutesfinetuningstartfinetuningjobrequest.md @@ -0,0 +1,8 @@ +# JobsAPIRoutesFineTuningStartFineTuningJobRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `job_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/jobsapiroutesfinetuningstartfinetuningjobresponse.md b/docs/models/jobsapiroutesfinetuningstartfinetuningjobresponse.md new file mode 100644 index 00000000..1a7e71d4 --- /dev/null +++ b/docs/models/jobsapiroutesfinetuningstartfinetuningjobresponse.md @@ -0,0 +1,19 @@ +# JobsAPIRoutesFineTuningStartFineTuningJobResponse + +OK + + +## Supported Types + +### `models.ClassifierFineTuningJobDetails` + +```python +value: models.ClassifierFineTuningJobDetails = /* values here */ +``` + +### `models.CompletionFineTuningJobDetails` + +```python +value: models.CompletionFineTuningJobDetails = /* values here */ +``` + diff --git a/docs/models/jobsapiroutesfinetuningunarchivefinetunedmodelrequest.md b/docs/models/jobsapiroutesfinetuningunarchivefinetunedmodelrequest.md new file mode 100644 index 00000000..95c1734d --- /dev/null +++ b/docs/models/jobsapiroutesfinetuningunarchivefinetunedmodelrequest.md @@ -0,0 +1,8 @@ +# JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| --------------------------------------------- | --------------------------------------------- | --------------------------------------------- | --------------------------------------------- | --------------------------------------------- | +| `model_id` | *str* | :heavy_check_mark: | The ID of the model to unarchive. | ft:open-mistral-7b:587a6b29:20240514:7e773925 | \ No newline at end of file diff --git a/docs/models/jobsapiroutesfinetuningupdatefinetunedmodelrequest.md b/docs/models/jobsapiroutesfinetuningupdatefinetunedmodelrequest.md new file mode 100644 index 00000000..dbe49a86 --- /dev/null +++ b/docs/models/jobsapiroutesfinetuningupdatefinetunedmodelrequest.md @@ -0,0 +1,9 @@ +# JobsAPIRoutesFineTuningUpdateFineTunedModelRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | +| `model_id` | *str* | :heavy_check_mark: | The ID of the model to update. | ft:open-mistral-7b:587a6b29:20240514:7e773925 | +| `update_model_request` | [models.UpdateModelRequest](../models/updatemodelrequest.md) | :heavy_check_mark: | N/A | | \ No newline at end of file diff --git a/docs/models/jobsapiroutesfinetuningupdatefinetunedmodelresponse.md b/docs/models/jobsapiroutesfinetuningupdatefinetunedmodelresponse.md new file mode 100644 index 00000000..f40350bf --- /dev/null +++ b/docs/models/jobsapiroutesfinetuningupdatefinetunedmodelresponse.md @@ -0,0 +1,19 @@ +# JobsAPIRoutesFineTuningUpdateFineTunedModelResponse + +OK + + +## Supported Types + +### `models.ClassifierFineTunedModel` + +```python +value: models.ClassifierFineTunedModel = /* values here */ +``` + +### `models.CompletionFineTunedModel` + +```python +value: models.CompletionFineTunedModel = /* values here */ +``` + diff --git a/docs/models/jsonschema.md b/docs/models/jsonschema.md new file mode 100644 index 00000000..7ff7c070 --- /dev/null +++ b/docs/models/jsonschema.md @@ -0,0 +1,11 @@ +# JSONSchema + + +## Fields + +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `name` | *str* | :heavy_check_mark: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `schema_definition` | Dict[str, *Any*] | :heavy_check_mark: | N/A | +| `strict` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/legacyjobmetadata.md b/docs/models/legacyjobmetadata.md new file mode 100644 index 00000000..4705ab4f --- /dev/null +++ b/docs/models/legacyjobmetadata.md @@ -0,0 +1,19 @@ +# LegacyJobMetadata + + +## Fields + +| Field | Type | Required | Description | Example | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `expected_duration_seconds` | *OptionalNullable[int]* | :heavy_minus_sign: | The approximated time (in seconds) for the fine-tuning process to complete. | 220 | +| `cost` | *OptionalNullable[float]* | :heavy_minus_sign: | The cost of the fine-tuning job. | 10 | +| `cost_currency` | *OptionalNullable[str]* | :heavy_minus_sign: | The currency used for the fine-tuning job cost. | EUR | +| `train_tokens_per_step` | *OptionalNullable[int]* | :heavy_minus_sign: | The number of tokens consumed by one training step. | 131072 | +| `train_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The total number of tokens used during the fine-tuning process. | 1310720 | +| `data_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The total number of tokens in the training dataset. | 305375 | +| `estimated_start_time` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | +| `deprecated` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `details` | *str* | :heavy_check_mark: | N/A | | +| `epochs` | *OptionalNullable[float]* | :heavy_minus_sign: | The number of complete passes through the entire training dataset. | 4.2922 | +| `training_steps` | *OptionalNullable[int]* | :heavy_minus_sign: | The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset. | 10 | +| `object` | *Optional[Literal["job.metadata"]]* | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/librariesdeletev1request.md b/docs/models/librariesdeletev1request.md new file mode 100644 index 00000000..68d7e543 --- /dev/null +++ b/docs/models/librariesdeletev1request.md @@ -0,0 +1,8 @@ +# LibrariesDeleteV1Request + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `library_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/librariesdocumentsdeletev1request.md b/docs/models/librariesdocumentsdeletev1request.md new file mode 100644 index 00000000..efccdb1b --- /dev/null +++ b/docs/models/librariesdocumentsdeletev1request.md @@ -0,0 +1,9 @@ +# LibrariesDocumentsDeleteV1Request + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `document_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/librariesdocumentsgetextractedtextsignedurlv1request.md b/docs/models/librariesdocumentsgetextractedtextsignedurlv1request.md new file mode 100644 index 00000000..14ca66f7 --- /dev/null +++ b/docs/models/librariesdocumentsgetextractedtextsignedurlv1request.md @@ -0,0 +1,9 @@ +# LibrariesDocumentsGetExtractedTextSignedURLV1Request + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `document_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/librariesdocumentsgetsignedurlv1request.md b/docs/models/librariesdocumentsgetsignedurlv1request.md new file mode 100644 index 00000000..7c08c180 --- /dev/null +++ b/docs/models/librariesdocumentsgetsignedurlv1request.md @@ -0,0 +1,9 @@ +# LibrariesDocumentsGetSignedURLV1Request + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `document_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/librariesdocumentsgetstatusv1request.md b/docs/models/librariesdocumentsgetstatusv1request.md new file mode 100644 index 00000000..e6d41875 --- /dev/null +++ b/docs/models/librariesdocumentsgetstatusv1request.md @@ -0,0 +1,9 @@ +# LibrariesDocumentsGetStatusV1Request + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `document_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/librariesdocumentsgettextcontentv1request.md b/docs/models/librariesdocumentsgettextcontentv1request.md new file mode 100644 index 00000000..2f58a446 --- /dev/null +++ b/docs/models/librariesdocumentsgettextcontentv1request.md @@ -0,0 +1,9 @@ +# LibrariesDocumentsGetTextContentV1Request + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `document_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/librariesdocumentsgetv1request.md b/docs/models/librariesdocumentsgetv1request.md new file mode 100644 index 00000000..6febc058 --- /dev/null +++ b/docs/models/librariesdocumentsgetv1request.md @@ -0,0 +1,9 @@ +# LibrariesDocumentsGetV1Request + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `document_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/librariesdocumentslistv1request.md b/docs/models/librariesdocumentslistv1request.md new file mode 100644 index 00000000..44f63001 --- /dev/null +++ b/docs/models/librariesdocumentslistv1request.md @@ -0,0 +1,14 @@ +# LibrariesDocumentsListV1Request + + +## Fields + +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `search` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `page` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `filters_attributes` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `sort_by` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `sort_order` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/librariesdocumentsreprocessv1request.md b/docs/models/librariesdocumentsreprocessv1request.md new file mode 100644 index 00000000..196ba17b --- /dev/null +++ b/docs/models/librariesdocumentsreprocessv1request.md @@ -0,0 +1,9 @@ +# LibrariesDocumentsReprocessV1Request + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `document_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/librariesdocumentsupdatev1request.md b/docs/models/librariesdocumentsupdatev1request.md new file mode 100644 index 00000000..d4630850 --- /dev/null +++ b/docs/models/librariesdocumentsupdatev1request.md @@ -0,0 +1,10 @@ +# LibrariesDocumentsUpdateV1Request + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------ | ------------------------------------------------------------------ | ------------------------------------------------------------------ | ------------------------------------------------------------------ | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `document_id` | *str* | :heavy_check_mark: | N/A | +| `update_document_request` | [models.UpdateDocumentRequest](../models/updatedocumentrequest.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/librariesdocumentsuploadv1request.md b/docs/models/librariesdocumentsuploadv1request.md new file mode 100644 index 00000000..172a6183 --- /dev/null +++ b/docs/models/librariesdocumentsuploadv1request.md @@ -0,0 +1,9 @@ +# LibrariesDocumentsUploadV1Request + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `request_body` | [models.DocumentUpload](../models/documentupload.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/librariesgetv1request.md b/docs/models/librariesgetv1request.md new file mode 100644 index 00000000..6e1e04c3 --- /dev/null +++ b/docs/models/librariesgetv1request.md @@ -0,0 +1,8 @@ +# LibrariesGetV1Request + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `library_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/librariessharecreatev1request.md b/docs/models/librariessharecreatev1request.md new file mode 100644 index 00000000..4c05241d --- /dev/null +++ b/docs/models/librariessharecreatev1request.md @@ -0,0 +1,9 @@ +# LibrariesShareCreateV1Request + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `sharing_in` | [models.SharingIn](../models/sharingin.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/librariessharedeletev1request.md b/docs/models/librariessharedeletev1request.md new file mode 100644 index 00000000..850e22ab --- /dev/null +++ b/docs/models/librariessharedeletev1request.md @@ -0,0 +1,9 @@ +# LibrariesShareDeleteV1Request + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `sharing_delete` | [models.SharingDelete](../models/sharingdelete.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/librariessharelistv1request.md b/docs/models/librariessharelistv1request.md new file mode 100644 index 00000000..98bf6d17 --- /dev/null +++ b/docs/models/librariessharelistv1request.md @@ -0,0 +1,8 @@ +# LibrariesShareListV1Request + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `library_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/librariesupdatev1request.md b/docs/models/librariesupdatev1request.md new file mode 100644 index 00000000..c5c142db --- /dev/null +++ b/docs/models/librariesupdatev1request.md @@ -0,0 +1,9 @@ +# LibrariesUpdateV1Request + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `update_library_request` | [models.UpdateLibraryRequest](../models/updatelibraryrequest.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/library.md b/docs/models/library.md new file mode 100644 index 00000000..4319f43d --- /dev/null +++ b/docs/models/library.md @@ -0,0 +1,23 @@ +# Library + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | +| `name` | *str* | :heavy_check_mark: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | +| `updated_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | +| `owner_id` | *Nullable[str]* | :heavy_check_mark: | N/A | +| `owner_type` | *str* | :heavy_check_mark: | N/A | +| `total_size` | *int* | :heavy_check_mark: | N/A | +| `nb_documents` | *int* | :heavy_check_mark: | N/A | +| `chunk_size` | *Nullable[int]* | :heavy_check_mark: | N/A | +| `emoji` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `generated_description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `explicit_user_members_count` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `explicit_workspace_members_count` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `org_sharing_role` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `generated_name` | *OptionalNullable[str]* | :heavy_minus_sign: | Generated Name | \ No newline at end of file diff --git a/docs/models/listbatchjobsresponse.md b/docs/models/listbatchjobsresponse.md new file mode 100644 index 00000000..c23e3220 --- /dev/null +++ b/docs/models/listbatchjobsresponse.md @@ -0,0 +1,10 @@ +# ListBatchJobsResponse + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | +| `data` | List[[models.BatchJob](../models/batchjob.md)] | :heavy_minus_sign: | N/A | +| `object` | *Optional[Literal["list"]]* | :heavy_minus_sign: | N/A | +| `total` | *int* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/listdocumentsresponse.md b/docs/models/listdocumentsresponse.md new file mode 100644 index 00000000..47b9d3b7 --- /dev/null +++ b/docs/models/listdocumentsresponse.md @@ -0,0 +1,9 @@ +# ListDocumentsResponse + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | +| `pagination` | [models.PaginationInfo](../models/paginationinfo.md) | :heavy_check_mark: | N/A | +| `data` | List[[models.Document](../models/document.md)] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/listfilesresponse.md b/docs/models/listfilesresponse.md new file mode 100644 index 00000000..802f685f --- /dev/null +++ b/docs/models/listfilesresponse.md @@ -0,0 +1,10 @@ +# ListFilesResponse + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | +| `data` | List[[models.FileSchema](../models/fileschema.md)] | :heavy_check_mark: | N/A | +| `object` | *str* | :heavy_check_mark: | N/A | +| `total` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/listfinetuningjobsresponse.md b/docs/models/listfinetuningjobsresponse.md new file mode 100644 index 00000000..00251242 --- /dev/null +++ b/docs/models/listfinetuningjobsresponse.md @@ -0,0 +1,10 @@ +# ListFineTuningJobsResponse + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | +| `data` | List[[models.ListFineTuningJobsResponseData](../models/listfinetuningjobsresponsedata.md)] | :heavy_minus_sign: | N/A | +| `object` | *Optional[Literal["list"]]* | :heavy_minus_sign: | N/A | +| `total` | *int* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/listfinetuningjobsresponsedata.md b/docs/models/listfinetuningjobsresponsedata.md new file mode 100644 index 00000000..adb06444 --- /dev/null +++ b/docs/models/listfinetuningjobsresponsedata.md @@ -0,0 +1,17 @@ +# ListFineTuningJobsResponseData + + +## Supported Types + +### `models.ClassifierFineTuningJob` + +```python +value: models.ClassifierFineTuningJob = /* values here */ +``` + +### `models.CompletionFineTuningJob` + +```python +value: models.CompletionFineTuningJob = /* values here */ +``` + diff --git a/docs/models/listlibrariesresponse.md b/docs/models/listlibrariesresponse.md new file mode 100644 index 00000000..e21b9ced --- /dev/null +++ b/docs/models/listlibrariesresponse.md @@ -0,0 +1,8 @@ +# ListLibrariesResponse + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------- | -------------------------------------------- | -------------------------------------------- | -------------------------------------------- | +| `data` | List[[models.Library](../models/library.md)] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/listsharingout.md b/docs/models/listsharingout.md new file mode 100644 index 00000000..bcac4834 --- /dev/null +++ b/docs/models/listsharingout.md @@ -0,0 +1,8 @@ +# ListSharingOut + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | +| `data` | List[[models.SharingOut](../models/sharingout.md)] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/loc.md b/docs/models/loc.md new file mode 100644 index 00000000..d6094ac2 --- /dev/null +++ b/docs/models/loc.md @@ -0,0 +1,17 @@ +# Loc + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `int` + +```python +value: int = /* values here */ +``` + diff --git a/docs/models/messageentries.md b/docs/models/messageentries.md new file mode 100644 index 00000000..76256fb9 --- /dev/null +++ b/docs/models/messageentries.md @@ -0,0 +1,17 @@ +# MessageEntries + + +## Supported Types + +### `models.MessageInputEntry` + +```python +value: models.MessageInputEntry = /* values here */ +``` + +### `models.MessageOutputEntry` + +```python +value: models.MessageOutputEntry = /* values here */ +``` + diff --git a/docs/models/messageinputcontentchunks.md b/docs/models/messageinputcontentchunks.md new file mode 100644 index 00000000..05617850 --- /dev/null +++ b/docs/models/messageinputcontentchunks.md @@ -0,0 +1,35 @@ +# MessageInputContentChunks + + +## Supported Types + +### `models.TextChunk` + +```python +value: models.TextChunk = /* values here */ +``` + +### `models.ImageURLChunk` + +```python +value: models.ImageURLChunk = /* values here */ +``` + +### `models.ToolFileChunk` + +```python +value: models.ToolFileChunk = /* values here */ +``` + +### `models.DocumentURLChunk` + +```python +value: models.DocumentURLChunk = /* values here */ +``` + +### `models.ConversationThinkChunk` + +```python +value: models.ConversationThinkChunk = /* values here */ +``` + diff --git a/docs/models/messageinputentry.md b/docs/models/messageinputentry.md new file mode 100644 index 00000000..f8514fb3 --- /dev/null +++ b/docs/models/messageinputentry.md @@ -0,0 +1,17 @@ +# MessageInputEntry + +Representation of an input message inside the conversation. + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | +| `object` | *Optional[Literal["entry"]]* | :heavy_minus_sign: | N/A | +| `type` | *Optional[Literal["message.input"]]* | :heavy_minus_sign: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `completed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `role` | [models.Role](../models/role.md) | :heavy_check_mark: | N/A | +| `content` | [models.MessageInputEntryContent](../models/messageinputentrycontent.md) | :heavy_check_mark: | N/A | +| `prefix` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/messageinputentrycontent.md b/docs/models/messageinputentrycontent.md new file mode 100644 index 00000000..65e55d97 --- /dev/null +++ b/docs/models/messageinputentrycontent.md @@ -0,0 +1,17 @@ +# MessageInputEntryContent + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[models.MessageInputContentChunks]` + +```python +value: List[models.MessageInputContentChunks] = /* values here */ +``` + diff --git a/docs/models/messageoutputcontentchunks.md b/docs/models/messageoutputcontentchunks.md new file mode 100644 index 00000000..c4a7777e --- /dev/null +++ b/docs/models/messageoutputcontentchunks.md @@ -0,0 +1,41 @@ +# MessageOutputContentChunks + + +## Supported Types + +### `models.TextChunk` + +```python +value: models.TextChunk = /* values here */ +``` + +### `models.ImageURLChunk` + +```python +value: models.ImageURLChunk = /* values here */ +``` + +### `models.ToolFileChunk` + +```python +value: models.ToolFileChunk = /* values here */ +``` + +### `models.DocumentURLChunk` + +```python +value: models.DocumentURLChunk = /* values here */ +``` + +### `models.ConversationThinkChunk` + +```python +value: models.ConversationThinkChunk = /* values here */ +``` + +### `models.ToolReferenceChunk` + +```python +value: models.ToolReferenceChunk = /* values here */ +``` + diff --git a/docs/models/messageoutputentry.md b/docs/models/messageoutputentry.md new file mode 100644 index 00000000..73a1c666 --- /dev/null +++ b/docs/models/messageoutputentry.md @@ -0,0 +1,16 @@ +# MessageOutputEntry + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | +| `object` | *Optional[Literal["entry"]]* | :heavy_minus_sign: | N/A | +| `type` | *Optional[Literal["message.output"]]* | :heavy_minus_sign: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `completed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `role` | *Optional[Literal["assistant"]]* | :heavy_minus_sign: | N/A | +| `content` | [models.MessageOutputEntryContent](../models/messageoutputentrycontent.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/messageoutputentrycontent.md b/docs/models/messageoutputentrycontent.md new file mode 100644 index 00000000..5206e4eb --- /dev/null +++ b/docs/models/messageoutputentrycontent.md @@ -0,0 +1,17 @@ +# MessageOutputEntryContent + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[models.MessageOutputContentChunks]` + +```python +value: List[models.MessageOutputContentChunks] = /* values here */ +``` + diff --git a/docs/models/messageoutputevent.md b/docs/models/messageoutputevent.md new file mode 100644 index 00000000..e09a965f --- /dev/null +++ b/docs/models/messageoutputevent.md @@ -0,0 +1,16 @@ +# MessageOutputEvent + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | +| `type` | *Literal["message.output.delta"]* | :heavy_check_mark: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `id` | *str* | :heavy_check_mark: | N/A | +| `content_index` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `role` | *Optional[Literal["assistant"]]* | :heavy_minus_sign: | N/A | +| `content` | [models.MessageOutputEventContent](../models/messageoutputeventcontent.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/messageoutputeventcontent.md b/docs/models/messageoutputeventcontent.md new file mode 100644 index 00000000..16d8d52f --- /dev/null +++ b/docs/models/messageoutputeventcontent.md @@ -0,0 +1,17 @@ +# MessageOutputEventContent + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `models.OutputContentChunks` + +```python +value: models.OutputContentChunks = /* values here */ +``` + diff --git a/docs/models/metric.md b/docs/models/metric.md new file mode 100644 index 00000000..7f863036 --- /dev/null +++ b/docs/models/metric.md @@ -0,0 +1,12 @@ +# Metric + +Metrics at the step number during the fine-tuning job. Use these metrics to assess if the training is going smoothly (loss should decrease, token accuracy should increase). + + +## Fields + +| Field | Type | Required | Description | +| --------------------------- | --------------------------- | --------------------------- | --------------------------- | +| `train_loss` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | +| `valid_loss` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | +| `valid_mean_token_accuracy` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/mistralpromptmode.md b/docs/models/mistralpromptmode.md new file mode 100644 index 00000000..c3409d03 --- /dev/null +++ b/docs/models/mistralpromptmode.md @@ -0,0 +1,12 @@ +# MistralPromptMode + +Available options to the prompt_mode argument on the chat completion endpoint. +Values represent high-level intent. Assignment to actual SPs is handled internally. +System prompt may include knowledge cutoff date, model capabilities, tone to use, safety guidelines, etc. + + +## Values + +| Name | Value | +| ----------- | ----------- | +| `REASONING` | reasoning | \ No newline at end of file diff --git a/docs/models/modelcapabilities.md b/docs/models/modelcapabilities.md new file mode 100644 index 00000000..c7dd2710 --- /dev/null +++ b/docs/models/modelcapabilities.md @@ -0,0 +1,17 @@ +# ModelCapabilities + + +## Fields + +| Field | Type | Required | Description | +| --------------------- | --------------------- | --------------------- | --------------------- | +| `completion_chat` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `function_calling` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `completion_fim` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `fine_tuning` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `vision` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `ocr` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `classification` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `moderation` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `audio` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `audio_transcription` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/modelconversation.md b/docs/models/modelconversation.md new file mode 100644 index 00000000..af2e5c61 --- /dev/null +++ b/docs/models/modelconversation.md @@ -0,0 +1,18 @@ +# ModelConversation + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | +| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | Instruction prompt the model will follow during the conversation. | +| `tools` | List[[models.ModelConversationTool](../models/modelconversationtool.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | +| `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | Name given to the conversation. | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | Description of the what the conversation is about. | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | Custom metadata for the conversation. | +| `object` | *Optional[Literal["conversation"]]* | :heavy_minus_sign: | N/A | +| `id` | *str* | :heavy_check_mark: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | +| `updated_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | +| `model` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/modelconversationtool.md b/docs/models/modelconversationtool.md new file mode 100644 index 00000000..87235567 --- /dev/null +++ b/docs/models/modelconversationtool.md @@ -0,0 +1,41 @@ +# ModelConversationTool + + +## Supported Types + +### `models.CodeInterpreterTool` + +```python +value: models.CodeInterpreterTool = /* values here */ +``` + +### `models.DocumentLibraryTool` + +```python +value: models.DocumentLibraryTool = /* values here */ +``` + +### `models.FunctionTool` + +```python +value: models.FunctionTool = /* values here */ +``` + +### `models.ImageGenerationTool` + +```python +value: models.ImageGenerationTool = /* values here */ +``` + +### `models.WebSearchTool` + +```python +value: models.WebSearchTool = /* values here */ +``` + +### `models.WebSearchPremiumTool` + +```python +value: models.WebSearchPremiumTool = /* values here */ +``` + diff --git a/docs/models/modellist.md b/docs/models/modellist.md new file mode 100644 index 00000000..85b20be7 --- /dev/null +++ b/docs/models/modellist.md @@ -0,0 +1,9 @@ +# ModelList + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | +| `object` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `data` | List[[models.ModelListData](../models/modellistdata.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/modellistdata.md b/docs/models/modellistdata.md new file mode 100644 index 00000000..b44e84a0 --- /dev/null +++ b/docs/models/modellistdata.md @@ -0,0 +1,17 @@ +# ModelListData + + +## Supported Types + +### `models.BaseModelCard` + +```python +value: models.BaseModelCard = /* values here */ +``` + +### `models.FTModelCard` + +```python +value: models.FTModelCard = /* values here */ +``` + diff --git a/docs/models/moderationobject.md b/docs/models/moderationobject.md new file mode 100644 index 00000000..320b2ab4 --- /dev/null +++ b/docs/models/moderationobject.md @@ -0,0 +1,9 @@ +# ModerationObject + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------- | ---------------------------- | ---------------------------- | ---------------------------- | +| `categories` | Dict[str, *bool*] | :heavy_minus_sign: | Moderation result thresholds | +| `category_scores` | Dict[str, *float*] | :heavy_minus_sign: | Moderation result | \ No newline at end of file diff --git a/docs/models/moderationresponse.md b/docs/models/moderationresponse.md new file mode 100644 index 00000000..75a5eec7 --- /dev/null +++ b/docs/models/moderationresponse.md @@ -0,0 +1,10 @@ +# ModerationResponse + + +## Fields + +| Field | Type | Required | Description | Example | +| -------------------------------------------------------------- | -------------------------------------------------------------- | -------------------------------------------------------------- | -------------------------------------------------------------- | -------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | mod-e5cc70bb28c444948073e77776eb30ef | +| `model` | *str* | :heavy_check_mark: | N/A | | +| `results` | List[[models.ModerationObject](../models/moderationobject.md)] | :heavy_check_mark: | N/A | | \ No newline at end of file diff --git a/docs/models/multipartbodyparams.md b/docs/models/multipartbodyparams.md new file mode 100644 index 00000000..f14b9573 --- /dev/null +++ b/docs/models/multipartbodyparams.md @@ -0,0 +1,9 @@ +# MultiPartBodyParams + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `purpose` | [Optional[models.FilePurpose]](../models/filepurpose.md) | :heavy_minus_sign: | N/A | +| `file` | [models.File](../models/file.md) | :heavy_check_mark: | The File object (not file name) to be uploaded.
To upload a file and specify a custom file name you should format your request as such:
```bash
file=@path/to/your/file.jsonl;filename=custom_name.jsonl
```
Otherwise, you can just keep the original file name:
```bash
file=@path/to/your/file.jsonl
``` | \ No newline at end of file diff --git a/docs/models/ocrimageobject.md b/docs/models/ocrimageobject.md new file mode 100644 index 00000000..3c0d5544 --- /dev/null +++ b/docs/models/ocrimageobject.md @@ -0,0 +1,14 @@ +# OCRImageObject + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | Image ID for extracted image in a page | +| `top_left_x` | *Nullable[int]* | :heavy_check_mark: | X coordinate of top-left corner of the extracted image | +| `top_left_y` | *Nullable[int]* | :heavy_check_mark: | Y coordinate of top-left corner of the extracted image | +| `bottom_right_x` | *Nullable[int]* | :heavy_check_mark: | X coordinate of bottom-right corner of the extracted image | +| `bottom_right_y` | *Nullable[int]* | :heavy_check_mark: | Y coordinate of bottom-right corner of the extracted image | +| `image_base64` | *OptionalNullable[str]* | :heavy_minus_sign: | Base64 string of the extracted image | +| `image_annotation` | *OptionalNullable[str]* | :heavy_minus_sign: | Annotation of the extracted image in json str | \ No newline at end of file diff --git a/docs/models/ocrpagedimensions.md b/docs/models/ocrpagedimensions.md new file mode 100644 index 00000000..c93ca64d --- /dev/null +++ b/docs/models/ocrpagedimensions.md @@ -0,0 +1,10 @@ +# OCRPageDimensions + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------- | ------------------------------- | ------------------------------- | ------------------------------- | +| `dpi` | *int* | :heavy_check_mark: | Dots per inch of the page-image | +| `height` | *int* | :heavy_check_mark: | Height of the image in pixels | +| `width` | *int* | :heavy_check_mark: | Width of the image in pixels | \ No newline at end of file diff --git a/docs/models/ocrpageobject.md b/docs/models/ocrpageobject.md new file mode 100644 index 00000000..02473d44 --- /dev/null +++ b/docs/models/ocrpageobject.md @@ -0,0 +1,15 @@ +# OCRPageObject + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `index` | *int* | :heavy_check_mark: | The page index in a pdf document starting from 0 | +| `markdown` | *str* | :heavy_check_mark: | The markdown string response of the page | +| `images` | List[[models.OCRImageObject](../models/ocrimageobject.md)] | :heavy_check_mark: | List of all extracted images in the page | +| `tables` | List[[models.OCRTableObject](../models/ocrtableobject.md)] | :heavy_minus_sign: | List of all extracted tables in the page | +| `hyperlinks` | List[*str*] | :heavy_minus_sign: | List of all hyperlinks in the page | +| `header` | *OptionalNullable[str]* | :heavy_minus_sign: | Header of the page | +| `footer` | *OptionalNullable[str]* | :heavy_minus_sign: | Footer of the page | +| `dimensions` | [Nullable[models.OCRPageDimensions]](../models/ocrpagedimensions.md) | :heavy_check_mark: | The dimensions of the PDF Page's screenshot image | \ No newline at end of file diff --git a/docs/models/ocrrequest.md b/docs/models/ocrrequest.md new file mode 100644 index 00000000..dd3fc2ea --- /dev/null +++ b/docs/models/ocrrequest.md @@ -0,0 +1,20 @@ +# OCRRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | N/A | | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | | +| `document` | [models.DocumentUnion](../models/documentunion.md) | :heavy_check_mark: | Document to run OCR on | | +| `pages` | List[*int*] | :heavy_minus_sign: | Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0 | | +| `include_image_base64` | *OptionalNullable[bool]* | :heavy_minus_sign: | Include image URLs in response | | +| `image_limit` | *OptionalNullable[int]* | :heavy_minus_sign: | Max images to extract | | +| `image_min_size` | *OptionalNullable[int]* | :heavy_minus_sign: | Minimum height and width of image to extract | | +| `bbox_annotation_format` | [OptionalNullable[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field | **Example 1:** {
"type": "text"
}
**Example 2:** {
"type": "json_object"
}
**Example 3:** {
"type": "json_schema",
"json_schema": {
"schema": {
"properties": {
"name": {
"title": "Name",
"type": "string"
},
"authors": {
"items": {
"type": "string"
},
"title": "Authors",
"type": "array"
}
},
"required": [
"name",
"authors"
],
"title": "Book",
"type": "object",
"additionalProperties": false
},
"name": "book",
"strict": true
}
} | +| `document_annotation_format` | [OptionalNullable[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field | **Example 1:** {
"type": "text"
}
**Example 2:** {
"type": "json_object"
}
**Example 3:** {
"type": "json_schema",
"json_schema": {
"schema": {
"properties": {
"name": {
"title": "Name",
"type": "string"
},
"authors": {
"items": {
"type": "string"
},
"title": "Authors",
"type": "array"
}
},
"required": [
"name",
"authors"
],
"title": "Book",
"type": "object",
"additionalProperties": false
},
"name": "book",
"strict": true
}
} | +| `document_annotation_prompt` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional prompt to guide the model in extracting structured output from the entire document. A document_annotation_format must be provided. | | +| `table_format` | [OptionalNullable[models.TableFormat]](../models/tableformat.md) | :heavy_minus_sign: | N/A | | +| `extract_header` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `extract_footer` | *Optional[bool]* | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/ocrresponse.md b/docs/models/ocrresponse.md new file mode 100644 index 00000000..0a309317 --- /dev/null +++ b/docs/models/ocrresponse.md @@ -0,0 +1,11 @@ +# OCRResponse + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | +| `pages` | List[[models.OCRPageObject](../models/ocrpageobject.md)] | :heavy_check_mark: | List of OCR info for pages. | +| `model` | *str* | :heavy_check_mark: | The model used to generate the OCR. | +| `document_annotation` | *OptionalNullable[str]* | :heavy_minus_sign: | Formatted response in the request_format if provided in json str | +| `usage_info` | [models.OCRUsageInfo](../models/ocrusageinfo.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/ocrtableobject.md b/docs/models/ocrtableobject.md new file mode 100644 index 00000000..4e27697c --- /dev/null +++ b/docs/models/ocrtableobject.md @@ -0,0 +1,10 @@ +# OCRTableObject + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------- | ---------------------------------------- | ---------------------------------------- | ---------------------------------------- | +| `id` | *str* | :heavy_check_mark: | Table ID for extracted table in a page | +| `content` | *str* | :heavy_check_mark: | Content of the table in the given format | +| `format_` | [models.Format](../models/format_.md) | :heavy_check_mark: | Format of the table | \ No newline at end of file diff --git a/docs/models/ocrusageinfo.md b/docs/models/ocrusageinfo.md new file mode 100644 index 00000000..d9d79125 --- /dev/null +++ b/docs/models/ocrusageinfo.md @@ -0,0 +1,9 @@ +# OCRUsageInfo + + +## Fields + +| Field | Type | Required | Description | +| ------------------------- | ------------------------- | ------------------------- | ------------------------- | +| `pages_processed` | *int* | :heavy_check_mark: | Number of pages processed | +| `doc_size_bytes` | *OptionalNullable[int]* | :heavy_minus_sign: | Document size in bytes | \ No newline at end of file diff --git a/docs/models/orderby.md b/docs/models/orderby.md new file mode 100644 index 00000000..bba50df1 --- /dev/null +++ b/docs/models/orderby.md @@ -0,0 +1,9 @@ +# OrderBy + + +## Values + +| Name | Value | +| --------------- | --------------- | +| `CREATED` | created | +| `MINUS_CREATED` | -created | \ No newline at end of file diff --git a/docs/models/output.md b/docs/models/output.md new file mode 100644 index 00000000..d0ee0db9 --- /dev/null +++ b/docs/models/output.md @@ -0,0 +1,29 @@ +# Output + + +## Supported Types + +### `models.MessageOutputEntry` + +```python +value: models.MessageOutputEntry = /* values here */ +``` + +### `models.ToolExecutionEntry` + +```python +value: models.ToolExecutionEntry = /* values here */ +``` + +### `models.FunctionCallEntry` + +```python +value: models.FunctionCallEntry = /* values here */ +``` + +### `models.AgentHandoffEntry` + +```python +value: models.AgentHandoffEntry = /* values here */ +``` + diff --git a/docs/models/outputcontentchunks.md b/docs/models/outputcontentchunks.md new file mode 100644 index 00000000..e5185014 --- /dev/null +++ b/docs/models/outputcontentchunks.md @@ -0,0 +1,41 @@ +# OutputContentChunks + + +## Supported Types + +### `models.TextChunk` + +```python +value: models.TextChunk = /* values here */ +``` + +### `models.ImageURLChunk` + +```python +value: models.ImageURLChunk = /* values here */ +``` + +### `models.ToolFileChunk` + +```python +value: models.ToolFileChunk = /* values here */ +``` + +### `models.DocumentURLChunk` + +```python +value: models.DocumentURLChunk = /* values here */ +``` + +### `models.ConversationThinkChunk` + +```python +value: models.ConversationThinkChunk = /* values here */ +``` + +### `models.ToolReferenceChunk` + +```python +value: models.ToolReferenceChunk = /* values here */ +``` + diff --git a/docs/models/paginationinfo.md b/docs/models/paginationinfo.md new file mode 100644 index 00000000..ad1fbb86 --- /dev/null +++ b/docs/models/paginationinfo.md @@ -0,0 +1,12 @@ +# PaginationInfo + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `total_items` | *int* | :heavy_check_mark: | N/A | +| `total_pages` | *int* | :heavy_check_mark: | N/A | +| `current_page` | *int* | :heavy_check_mark: | N/A | +| `page_size` | *int* | :heavy_check_mark: | N/A | +| `has_more` | *bool* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/prediction.md b/docs/models/prediction.md new file mode 100644 index 00000000..fae3c1ca --- /dev/null +++ b/docs/models/prediction.md @@ -0,0 +1,11 @@ +# Prediction + +Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------ | ------------------------------ | ------------------------------ | ------------------------------ | +| `type` | *Optional[Literal["content"]]* | :heavy_minus_sign: | N/A | +| `content` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/processingstatusout.md b/docs/models/processingstatusout.md new file mode 100644 index 00000000..bc40d320 --- /dev/null +++ b/docs/models/processingstatusout.md @@ -0,0 +1,10 @@ +# ProcessingStatusOut + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | +| `document_id` | *str* | :heavy_check_mark: | N/A | +| `process_status` | [models.ProcessStatus](../models/processstatus.md) | :heavy_check_mark: | N/A | +| `processing_status` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/processstatus.md b/docs/models/processstatus.md new file mode 100644 index 00000000..3a9c004e --- /dev/null +++ b/docs/models/processstatus.md @@ -0,0 +1,15 @@ +# ProcessStatus + + +## Values + +| Name | Value | +| ---------------------- | ---------------------- | +| `SELF_MANAGED` | self_managed | +| `MISSING_CONTENT` | missing_content | +| `NOOP` | noop | +| `DONE` | done | +| `TODO` | todo | +| `IN_PROGRESS` | in_progress | +| `ERROR` | error | +| `WAITING_FOR_CAPACITY` | waiting_for_capacity | \ No newline at end of file diff --git a/docs/models/realtimetranscriptionerror.md b/docs/models/realtimetranscriptionerror.md new file mode 100644 index 00000000..e01f2126 --- /dev/null +++ b/docs/models/realtimetranscriptionerror.md @@ -0,0 +1,9 @@ +# RealtimeTranscriptionError + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | +| `type` | *Optional[Literal["error"]]* | :heavy_minus_sign: | N/A | +| `error` | [models.RealtimeTranscriptionErrorDetail](../models/realtimetranscriptionerrordetail.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/realtimetranscriptionerrordetail.md b/docs/models/realtimetranscriptionerrordetail.md new file mode 100644 index 00000000..5b34755d --- /dev/null +++ b/docs/models/realtimetranscriptionerrordetail.md @@ -0,0 +1,9 @@ +# RealtimeTranscriptionErrorDetail + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------ | +| `message` | [models.RealtimeTranscriptionErrorDetailMessage](../models/realtimetranscriptionerrordetailmessage.md) | :heavy_check_mark: | Human-readable error message. | +| `code` | *int* | :heavy_check_mark: | Internal error code for debugging. | \ No newline at end of file diff --git a/docs/models/realtimetranscriptionerrordetailmessage.md b/docs/models/realtimetranscriptionerrordetailmessage.md new file mode 100644 index 00000000..da3764ef --- /dev/null +++ b/docs/models/realtimetranscriptionerrordetailmessage.md @@ -0,0 +1,19 @@ +# RealtimeTranscriptionErrorDetailMessage + +Human-readable error message. + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `Dict[str, Any]` + +```python +value: Dict[str, Any] = /* values here */ +``` + diff --git a/docs/models/realtimetranscriptioninputaudioappend.md b/docs/models/realtimetranscriptioninputaudioappend.md new file mode 100644 index 00000000..5ee365eb --- /dev/null +++ b/docs/models/realtimetranscriptioninputaudioappend.md @@ -0,0 +1,9 @@ +# RealtimeTranscriptionInputAudioAppend + + +## Fields + +| Field | Type | Required | Description | +| ----------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------- | +| `type` | *Optional[Literal["input_audio.append"]]* | :heavy_minus_sign: | N/A | +| `audio` | *str* | :heavy_check_mark: | Base64-encoded raw PCM bytes matching the current audio_format. Max decoded size: 262144 bytes. | \ No newline at end of file diff --git a/docs/models/realtimetranscriptioninputaudioend.md b/docs/models/realtimetranscriptioninputaudioend.md new file mode 100644 index 00000000..393d208c --- /dev/null +++ b/docs/models/realtimetranscriptioninputaudioend.md @@ -0,0 +1,8 @@ +# RealtimeTranscriptionInputAudioEnd + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------- | -------------------------------------- | -------------------------------------- | -------------------------------------- | +| `type` | *Optional[Literal["input_audio.end"]]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/realtimetranscriptioninputaudioflush.md b/docs/models/realtimetranscriptioninputaudioflush.md new file mode 100644 index 00000000..367725ba --- /dev/null +++ b/docs/models/realtimetranscriptioninputaudioflush.md @@ -0,0 +1,8 @@ +# RealtimeTranscriptionInputAudioFlush + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------- | ---------------------------------------- | ---------------------------------------- | ---------------------------------------- | +| `type` | *Optional[Literal["input_audio.flush"]]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/realtimetranscriptionsession.md b/docs/models/realtimetranscriptionsession.md new file mode 100644 index 00000000..750bd7f7 --- /dev/null +++ b/docs/models/realtimetranscriptionsession.md @@ -0,0 +1,11 @@ +# RealtimeTranscriptionSession + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | +| `request_id` | *str* | :heavy_check_mark: | N/A | +| `model` | *str* | :heavy_check_mark: | N/A | +| `audio_format` | [models.AudioFormat](../models/audioformat.md) | :heavy_check_mark: | N/A | +| `target_streaming_delay_ms` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/realtimetranscriptionsessioncreated.md b/docs/models/realtimetranscriptionsessioncreated.md new file mode 100644 index 00000000..34e603fd --- /dev/null +++ b/docs/models/realtimetranscriptionsessioncreated.md @@ -0,0 +1,9 @@ +# RealtimeTranscriptionSessionCreated + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | +| `type` | *Optional[Literal["session.created"]]* | :heavy_minus_sign: | N/A | +| `session` | [models.RealtimeTranscriptionSession](../models/realtimetranscriptionsession.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/realtimetranscriptionsessionupdated.md b/docs/models/realtimetranscriptionsessionupdated.md new file mode 100644 index 00000000..7e271995 --- /dev/null +++ b/docs/models/realtimetranscriptionsessionupdated.md @@ -0,0 +1,9 @@ +# RealtimeTranscriptionSessionUpdated + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | +| `type` | *Optional[Literal["session.updated"]]* | :heavy_minus_sign: | N/A | +| `session` | [models.RealtimeTranscriptionSession](../models/realtimetranscriptionsession.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/realtimetranscriptionsessionupdatemessage.md b/docs/models/realtimetranscriptionsessionupdatemessage.md new file mode 100644 index 00000000..2a50ca92 --- /dev/null +++ b/docs/models/realtimetranscriptionsessionupdatemessage.md @@ -0,0 +1,9 @@ +# RealtimeTranscriptionSessionUpdateMessage + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | +| `type` | *Optional[Literal["session.update"]]* | :heavy_minus_sign: | N/A | +| `session` | [models.RealtimeTranscriptionSessionUpdatePayload](../models/realtimetranscriptionsessionupdatepayload.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/realtimetranscriptionsessionupdatepayload.md b/docs/models/realtimetranscriptionsessionupdatepayload.md new file mode 100644 index 00000000..d6c6547d --- /dev/null +++ b/docs/models/realtimetranscriptionsessionupdatepayload.md @@ -0,0 +1,9 @@ +# RealtimeTranscriptionSessionUpdatePayload + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | +| `audio_format` | [OptionalNullable[models.AudioFormat]](../models/audioformat.md) | :heavy_minus_sign: | Set before sending audio. Audio format updates are rejected after audio starts. | +| `target_streaming_delay_ms` | *OptionalNullable[int]* | :heavy_minus_sign: | Set before sending audio. Streaming delay updates are rejected after audio starts. | \ No newline at end of file diff --git a/docs/models/referencechunk.md b/docs/models/referencechunk.md new file mode 100644 index 00000000..d847e248 --- /dev/null +++ b/docs/models/referencechunk.md @@ -0,0 +1,9 @@ +# ReferenceChunk + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------- | -------------------------------- | -------------------------------- | -------------------------------- | +| `type` | *Optional[Literal["reference"]]* | :heavy_minus_sign: | N/A | +| `reference_ids` | List[*int*] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/requestsource.md b/docs/models/requestsource.md new file mode 100644 index 00000000..c81c1159 --- /dev/null +++ b/docs/models/requestsource.md @@ -0,0 +1,10 @@ +# RequestSource + + +## Values + +| Name | Value | +| ------------------ | ------------------ | +| `API` | api | +| `PLAYGROUND` | playground | +| `AGENT_BUILDER_V1` | agent_builder_v1 | \ No newline at end of file diff --git a/docs/models/response.md b/docs/models/response.md new file mode 100644 index 00000000..ff679257 --- /dev/null +++ b/docs/models/response.md @@ -0,0 +1,17 @@ +# Response + + +## Supported Types + +### `models.ClassifierFineTuningJob` + +```python +value: models.ClassifierFineTuningJob = /* values here */ +``` + +### `models.CompletionFineTuningJob` + +```python +value: models.CompletionFineTuningJob = /* values here */ +``` + diff --git a/docs/models/responsedoneevent.md b/docs/models/responsedoneevent.md new file mode 100644 index 00000000..63d4cc06 --- /dev/null +++ b/docs/models/responsedoneevent.md @@ -0,0 +1,10 @@ +# ResponseDoneEvent + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `type` | *Literal["conversation.response.done"]* | :heavy_check_mark: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `usage` | [models.ConversationUsageInfo](../models/conversationusageinfo.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/responseerrorevent.md b/docs/models/responseerrorevent.md new file mode 100644 index 00000000..4309bdad --- /dev/null +++ b/docs/models/responseerrorevent.md @@ -0,0 +1,11 @@ +# ResponseErrorEvent + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `type` | *Literal["conversation.response.error"]* | :heavy_check_mark: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `message` | *str* | :heavy_check_mark: | N/A | +| `code` | *int* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/responseformat.md b/docs/models/responseformat.md new file mode 100644 index 00000000..5cab22f2 --- /dev/null +++ b/docs/models/responseformat.md @@ -0,0 +1,11 @@ +# ResponseFormat + +Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | +| `type` | [Optional[models.ResponseFormats]](../models/responseformats.md) | :heavy_minus_sign: | N/A | +| `json_schema` | [OptionalNullable[models.JSONSchema]](../models/jsonschema.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/responseformats.md b/docs/models/responseformats.md new file mode 100644 index 00000000..2f5f1e55 --- /dev/null +++ b/docs/models/responseformats.md @@ -0,0 +1,10 @@ +# ResponseFormats + + +## Values + +| Name | Value | +| ------------- | ------------- | +| `TEXT` | text | +| `JSON_OBJECT` | json_object | +| `JSON_SCHEMA` | json_schema | \ No newline at end of file diff --git a/docs/models/responseretrievemodelv1modelsmodelidget.md b/docs/models/responseretrievemodelv1modelsmodelidget.md new file mode 100644 index 00000000..ffbc1473 --- /dev/null +++ b/docs/models/responseretrievemodelv1modelsmodelidget.md @@ -0,0 +1,19 @@ +# ResponseRetrieveModelV1ModelsModelIDGet + +Successful Response + + +## Supported Types + +### `models.BaseModelCard` + +```python +value: models.BaseModelCard = /* values here */ +``` + +### `models.FTModelCard` + +```python +value: models.FTModelCard = /* values here */ +``` + diff --git a/docs/models/responsestartedevent.md b/docs/models/responsestartedevent.md new file mode 100644 index 00000000..e2f421af --- /dev/null +++ b/docs/models/responsestartedevent.md @@ -0,0 +1,10 @@ +# ResponseStartedEvent + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `type` | *Literal["conversation.response.started"]* | :heavy_check_mark: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `conversation_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/responsev1conversationsget.md b/docs/models/responsev1conversationsget.md new file mode 100644 index 00000000..844c5d61 --- /dev/null +++ b/docs/models/responsev1conversationsget.md @@ -0,0 +1,19 @@ +# ResponseV1ConversationsGet + +Successful Response + + +## Supported Types + +### `models.ModelConversation` + +```python +value: models.ModelConversation = /* values here */ +``` + +### `models.AgentConversation` + +```python +value: models.AgentConversation = /* values here */ +``` + diff --git a/docs/models/retrievemodelv1modelsmodelidgetrequest.md b/docs/models/retrievemodelv1modelsmodelidgetrequest.md new file mode 100644 index 00000000..f1280f88 --- /dev/null +++ b/docs/models/retrievemodelv1modelsmodelidgetrequest.md @@ -0,0 +1,8 @@ +# RetrieveModelV1ModelsModelIDGetRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| --------------------------------------------- | --------------------------------------------- | --------------------------------------------- | --------------------------------------------- | --------------------------------------------- | +| `model_id` | *str* | :heavy_check_mark: | The ID of the model to retrieve. | ft:open-mistral-7b:587a6b29:20240514:7e773925 | \ No newline at end of file diff --git a/docs/models/role.md b/docs/models/role.md new file mode 100644 index 00000000..853c6257 --- /dev/null +++ b/docs/models/role.md @@ -0,0 +1,9 @@ +# Role + + +## Values + +| Name | Value | +| ----------- | ----------- | +| `ASSISTANT` | assistant | +| `USER` | user | \ No newline at end of file diff --git a/docs/models/sampletype.md b/docs/models/sampletype.md new file mode 100644 index 00000000..34a6a012 --- /dev/null +++ b/docs/models/sampletype.md @@ -0,0 +1,12 @@ +# SampleType + + +## Values + +| Name | Value | +| --------------- | --------------- | +| `PRETRAIN` | pretrain | +| `INSTRUCT` | instruct | +| `BATCH_REQUEST` | batch_request | +| `BATCH_RESULT` | batch_result | +| `BATCH_ERROR` | batch_error | \ No newline at end of file diff --git a/docs/models/security.md b/docs/models/security.md new file mode 100644 index 00000000..2e0839d0 --- /dev/null +++ b/docs/models/security.md @@ -0,0 +1,8 @@ +# Security + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `api_key` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/shareenum.md b/docs/models/shareenum.md new file mode 100644 index 00000000..dc5d2b68 --- /dev/null +++ b/docs/models/shareenum.md @@ -0,0 +1,9 @@ +# ShareEnum + + +## Values + +| Name | Value | +| -------- | -------- | +| `VIEWER` | Viewer | +| `EDITOR` | Editor | \ No newline at end of file diff --git a/docs/models/sharingdelete.md b/docs/models/sharingdelete.md new file mode 100644 index 00000000..1dcec095 --- /dev/null +++ b/docs/models/sharingdelete.md @@ -0,0 +1,10 @@ +# SharingDelete + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `org_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `share_with_uuid` | *str* | :heavy_check_mark: | The id of the entity (user, workspace or organization) to share with | +| `share_with_type` | [models.EntityType](../models/entitytype.md) | :heavy_check_mark: | The type of entity, used to share a library. | \ No newline at end of file diff --git a/docs/models/sharingin.md b/docs/models/sharingin.md new file mode 100644 index 00000000..bac18c8d --- /dev/null +++ b/docs/models/sharingin.md @@ -0,0 +1,11 @@ +# SharingIn + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `org_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `level` | [models.ShareEnum](../models/shareenum.md) | :heavy_check_mark: | N/A | +| `share_with_uuid` | *str* | :heavy_check_mark: | The id of the entity (user, workspace or organization) to share with | +| `share_with_type` | [models.EntityType](../models/entitytype.md) | :heavy_check_mark: | The type of entity, used to share a library. | \ No newline at end of file diff --git a/docs/models/sharingout.md b/docs/models/sharingout.md new file mode 100644 index 00000000..35aeff43 --- /dev/null +++ b/docs/models/sharingout.md @@ -0,0 +1,13 @@ +# SharingOut + + +## Fields + +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `user_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `org_id` | *str* | :heavy_check_mark: | N/A | +| `role` | *str* | :heavy_check_mark: | N/A | +| `share_with_type` | *str* | :heavy_check_mark: | N/A | +| `share_with_uuid` | *Nullable[str]* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/source.md b/docs/models/source.md new file mode 100644 index 00000000..bb1ed612 --- /dev/null +++ b/docs/models/source.md @@ -0,0 +1,10 @@ +# Source + + +## Values + +| Name | Value | +| ------------ | ------------ | +| `UPLOAD` | upload | +| `REPOSITORY` | repository | +| `MISTRAL` | mistral | \ No newline at end of file diff --git a/docs/models/ssetypes.md b/docs/models/ssetypes.md new file mode 100644 index 00000000..ae06b5e8 --- /dev/null +++ b/docs/models/ssetypes.md @@ -0,0 +1,19 @@ +# SSETypes + +Server side events sent when streaming a conversation response. + + +## Values + +| Name | Value | +| ------------------------------- | ------------------------------- | +| `CONVERSATION_RESPONSE_STARTED` | conversation.response.started | +| `CONVERSATION_RESPONSE_DONE` | conversation.response.done | +| `CONVERSATION_RESPONSE_ERROR` | conversation.response.error | +| `MESSAGE_OUTPUT_DELTA` | message.output.delta | +| `TOOL_EXECUTION_STARTED` | tool.execution.started | +| `TOOL_EXECUTION_DELTA` | tool.execution.delta | +| `TOOL_EXECUTION_DONE` | tool.execution.done | +| `AGENT_HANDOFF_STARTED` | agent.handoff.started | +| `AGENT_HANDOFF_DONE` | agent.handoff.done | +| `FUNCTION_CALL_DELTA` | function.call.delta | \ No newline at end of file diff --git a/docs/models/systemmessage.md b/docs/models/systemmessage.md new file mode 100644 index 00000000..10bda10f --- /dev/null +++ b/docs/models/systemmessage.md @@ -0,0 +1,9 @@ +# SystemMessage + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | +| `role` | *Literal["system"]* | :heavy_check_mark: | N/A | +| `content` | [models.SystemMessageContent](../models/systemmessagecontent.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/systemmessagecontent.md b/docs/models/systemmessagecontent.md new file mode 100644 index 00000000..0c87baf3 --- /dev/null +++ b/docs/models/systemmessagecontent.md @@ -0,0 +1,17 @@ +# SystemMessageContent + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[models.SystemMessageContentChunks]` + +```python +value: List[models.SystemMessageContentChunks] = /* values here */ +``` + diff --git a/docs/models/systemmessagecontentchunks.md b/docs/models/systemmessagecontentchunks.md new file mode 100644 index 00000000..40030c17 --- /dev/null +++ b/docs/models/systemmessagecontentchunks.md @@ -0,0 +1,17 @@ +# SystemMessageContentChunks + + +## Supported Types + +### `models.TextChunk` + +```python +value: models.TextChunk = /* values here */ +``` + +### `models.ThinkChunk` + +```python +value: models.ThinkChunk = /* values here */ +``` + diff --git a/docs/models/tableformat.md b/docs/models/tableformat.md new file mode 100644 index 00000000..54f029b8 --- /dev/null +++ b/docs/models/tableformat.md @@ -0,0 +1,9 @@ +# TableFormat + + +## Values + +| Name | Value | +| ---------- | ---------- | +| `MARKDOWN` | markdown | +| `HTML` | html | \ No newline at end of file diff --git a/docs/models/textchunk.md b/docs/models/textchunk.md new file mode 100644 index 00000000..df0e61c3 --- /dev/null +++ b/docs/models/textchunk.md @@ -0,0 +1,9 @@ +# TextChunk + + +## Fields + +| Field | Type | Required | Description | +| --------------------------- | --------------------------- | --------------------------- | --------------------------- | +| `type` | *Optional[Literal["text"]]* | :heavy_minus_sign: | N/A | +| `text` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/thinkchunk.md b/docs/models/thinkchunk.md new file mode 100644 index 00000000..70c0369f --- /dev/null +++ b/docs/models/thinkchunk.md @@ -0,0 +1,10 @@ +# ThinkChunk + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | +| `type` | *Literal["thinking"]* | :heavy_check_mark: | N/A | +| `thinking` | List[[models.ThinkChunkThinking](../models/thinkchunkthinking.md)] | :heavy_check_mark: | N/A | +| `closed` | *Optional[bool]* | :heavy_minus_sign: | Whether the thinking chunk is closed or not. Currently only used for prefixing. | \ No newline at end of file diff --git a/docs/models/thinkchunkthinking.md b/docs/models/thinkchunkthinking.md new file mode 100644 index 00000000..dd1ecca1 --- /dev/null +++ b/docs/models/thinkchunkthinking.md @@ -0,0 +1,17 @@ +# ThinkChunkThinking + + +## Supported Types + +### `models.ReferenceChunk` + +```python +value: models.ReferenceChunk = /* values here */ +``` + +### `models.TextChunk` + +```python +value: models.TextChunk = /* values here */ +``` + diff --git a/docs/models/timestampgranularity.md b/docs/models/timestampgranularity.md new file mode 100644 index 00000000..d20012ea --- /dev/null +++ b/docs/models/timestampgranularity.md @@ -0,0 +1,9 @@ +# TimestampGranularity + + +## Values + +| Name | Value | +| --------- | --------- | +| `SEGMENT` | segment | +| `WORD` | word | \ No newline at end of file diff --git a/docs/models/tool.md b/docs/models/tool.md new file mode 100644 index 00000000..fb661f72 --- /dev/null +++ b/docs/models/tool.md @@ -0,0 +1,9 @@ +# Tool + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | +| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | +| `function` | [models.Function](../models/function.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/toolcall.md b/docs/models/toolcall.md new file mode 100644 index 00000000..3819236b --- /dev/null +++ b/docs/models/toolcall.md @@ -0,0 +1,11 @@ +# ToolCall + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | +| `function` | [models.FunctionCall](../models/functioncall.md) | :heavy_check_mark: | N/A | +| `index` | *Optional[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/toolcallconfirmation.md b/docs/models/toolcallconfirmation.md new file mode 100644 index 00000000..1812f7d6 --- /dev/null +++ b/docs/models/toolcallconfirmation.md @@ -0,0 +1,9 @@ +# ToolCallConfirmation + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------ | ------------------------------------------------ | ------------------------------------------------ | ------------------------------------------------ | +| `tool_call_id` | *str* | :heavy_check_mark: | N/A | +| `confirmation` | [models.Confirmation](../models/confirmation.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/toolchoice.md b/docs/models/toolchoice.md new file mode 100644 index 00000000..373046bb --- /dev/null +++ b/docs/models/toolchoice.md @@ -0,0 +1,11 @@ +# ToolChoice + +ToolChoice is either a ToolChoiceEnum or a ToolChoice + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | +| `function` | [models.FunctionName](../models/functionname.md) | :heavy_check_mark: | this restriction of `Function` is used to select a specific function to call | \ No newline at end of file diff --git a/docs/models/toolchoiceenum.md b/docs/models/toolchoiceenum.md new file mode 100644 index 00000000..0be3d6c5 --- /dev/null +++ b/docs/models/toolchoiceenum.md @@ -0,0 +1,11 @@ +# ToolChoiceEnum + + +## Values + +| Name | Value | +| ---------- | ---------- | +| `AUTO` | auto | +| `NONE` | none | +| `ANY` | any | +| `REQUIRED` | required | \ No newline at end of file diff --git a/docs/models/toolconfiguration.md b/docs/models/toolconfiguration.md new file mode 100644 index 00000000..89286a17 --- /dev/null +++ b/docs/models/toolconfiguration.md @@ -0,0 +1,10 @@ +# ToolConfiguration + + +## Fields + +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `exclude` | List[*str*] | :heavy_minus_sign: | N/A | +| `include` | List[*str*] | :heavy_minus_sign: | N/A | +| `requires_confirmation` | List[*str*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/toolexecutiondeltaevent.md b/docs/models/toolexecutiondeltaevent.md new file mode 100644 index 00000000..7066f348 --- /dev/null +++ b/docs/models/toolexecutiondeltaevent.md @@ -0,0 +1,13 @@ +# ToolExecutionDeltaEvent + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | +| `type` | *Literal["tool.execution.delta"]* | :heavy_check_mark: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `id` | *str* | :heavy_check_mark: | N/A | +| `name` | [models.ToolExecutionDeltaEventName](../models/toolexecutiondeltaeventname.md) | :heavy_check_mark: | N/A | +| `arguments` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/toolexecutiondeltaeventname.md b/docs/models/toolexecutiondeltaeventname.md new file mode 100644 index 00000000..9c3edef8 --- /dev/null +++ b/docs/models/toolexecutiondeltaeventname.md @@ -0,0 +1,17 @@ +# ToolExecutionDeltaEventName + + +## Supported Types + +### `models.BuiltInConnectors` + +```python +value: models.BuiltInConnectors = /* values here */ +``` + +### `str` + +```python +value: str = /* values here */ +``` + diff --git a/docs/models/toolexecutiondoneevent.md b/docs/models/toolexecutiondoneevent.md new file mode 100644 index 00000000..b2d81be3 --- /dev/null +++ b/docs/models/toolexecutiondoneevent.md @@ -0,0 +1,13 @@ +# ToolExecutionDoneEvent + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `type` | *Literal["tool.execution.done"]* | :heavy_check_mark: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `id` | *str* | :heavy_check_mark: | N/A | +| `name` | [models.ToolExecutionDoneEventName](../models/toolexecutiondoneeventname.md) | :heavy_check_mark: | N/A | +| `info` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/toolexecutiondoneeventname.md b/docs/models/toolexecutiondoneeventname.md new file mode 100644 index 00000000..6449079d --- /dev/null +++ b/docs/models/toolexecutiondoneeventname.md @@ -0,0 +1,17 @@ +# ToolExecutionDoneEventName + + +## Supported Types + +### `models.BuiltInConnectors` + +```python +value: models.BuiltInConnectors = /* values here */ +``` + +### `str` + +```python +value: str = /* values here */ +``` + diff --git a/docs/models/toolexecutionentry.md b/docs/models/toolexecutionentry.md new file mode 100644 index 00000000..03316381 --- /dev/null +++ b/docs/models/toolexecutionentry.md @@ -0,0 +1,17 @@ +# ToolExecutionEntry + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `object` | *Optional[Literal["entry"]]* | :heavy_minus_sign: | N/A | +| `type` | *Optional[Literal["tool.execution"]]* | :heavy_minus_sign: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `completed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `name` | [models.ToolExecutionEntryName](../models/toolexecutionentryname.md) | :heavy_check_mark: | N/A | +| `arguments` | *str* | :heavy_check_mark: | N/A | +| `info` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/toolexecutionentryname.md b/docs/models/toolexecutionentryname.md new file mode 100644 index 00000000..fb762a53 --- /dev/null +++ b/docs/models/toolexecutionentryname.md @@ -0,0 +1,17 @@ +# ToolExecutionEntryName + + +## Supported Types + +### `models.BuiltInConnectors` + +```python +value: models.BuiltInConnectors = /* values here */ +``` + +### `str` + +```python +value: str = /* values here */ +``` + diff --git a/docs/models/toolexecutionstartedevent.md b/docs/models/toolexecutionstartedevent.md new file mode 100644 index 00000000..189b8a3d --- /dev/null +++ b/docs/models/toolexecutionstartedevent.md @@ -0,0 +1,15 @@ +# ToolExecutionStartedEvent + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | +| `type` | *Literal["tool.execution.started"]* | :heavy_check_mark: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `id` | *str* | :heavy_check_mark: | N/A | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `name` | [models.ToolExecutionStartedEventName](../models/toolexecutionstartedeventname.md) | :heavy_check_mark: | N/A | +| `arguments` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/toolexecutionstartedeventname.md b/docs/models/toolexecutionstartedeventname.md new file mode 100644 index 00000000..3308c483 --- /dev/null +++ b/docs/models/toolexecutionstartedeventname.md @@ -0,0 +1,17 @@ +# ToolExecutionStartedEventName + + +## Supported Types + +### `models.BuiltInConnectors` + +```python +value: models.BuiltInConnectors = /* values here */ +``` + +### `str` + +```python +value: str = /* values here */ +``` + diff --git a/docs/models/toolfilechunk.md b/docs/models/toolfilechunk.md new file mode 100644 index 00000000..d6002175 --- /dev/null +++ b/docs/models/toolfilechunk.md @@ -0,0 +1,12 @@ +# ToolFileChunk + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | +| `type` | *Optional[Literal["tool_file"]]* | :heavy_minus_sign: | N/A | +| `tool` | [models.ToolFileChunkTool](../models/toolfilechunktool.md) | :heavy_check_mark: | N/A | +| `file_id` | *str* | :heavy_check_mark: | N/A | +| `file_name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `file_type` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/toolfilechunktool.md b/docs/models/toolfilechunktool.md new file mode 100644 index 00000000..aa5ac8a9 --- /dev/null +++ b/docs/models/toolfilechunktool.md @@ -0,0 +1,17 @@ +# ToolFileChunkTool + + +## Supported Types + +### `models.BuiltInConnectors` + +```python +value: models.BuiltInConnectors = /* values here */ +``` + +### `str` + +```python +value: str = /* values here */ +``` + diff --git a/docs/models/toolmessage.md b/docs/models/toolmessage.md new file mode 100644 index 00000000..7201481e --- /dev/null +++ b/docs/models/toolmessage.md @@ -0,0 +1,11 @@ +# ToolMessage + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | +| `role` | *Literal["tool"]* | :heavy_check_mark: | N/A | +| `content` | [Nullable[models.ToolMessageContent]](../models/toolmessagecontent.md) | :heavy_check_mark: | N/A | +| `tool_call_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/toolmessagecontent.md b/docs/models/toolmessagecontent.md new file mode 100644 index 00000000..5c76091f --- /dev/null +++ b/docs/models/toolmessagecontent.md @@ -0,0 +1,17 @@ +# ToolMessageContent + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[models.ContentChunk]` + +```python +value: List[models.ContentChunk] = /* values here */ +``` + diff --git a/docs/models/toolreferencechunk.md b/docs/models/toolreferencechunk.md new file mode 100644 index 00000000..49ea4ca7 --- /dev/null +++ b/docs/models/toolreferencechunk.md @@ -0,0 +1,13 @@ +# ToolReferenceChunk + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `type` | *Optional[Literal["tool_reference"]]* | :heavy_minus_sign: | N/A | +| `tool` | [models.ToolReferenceChunkTool](../models/toolreferencechunktool.md) | :heavy_check_mark: | N/A | +| `title` | *str* | :heavy_check_mark: | N/A | +| `url` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `favicon` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/toolreferencechunktool.md b/docs/models/toolreferencechunktool.md new file mode 100644 index 00000000..999f7c34 --- /dev/null +++ b/docs/models/toolreferencechunktool.md @@ -0,0 +1,17 @@ +# ToolReferenceChunkTool + + +## Supported Types + +### `models.BuiltInConnectors` + +```python +value: models.BuiltInConnectors = /* values here */ +``` + +### `str` + +```python +value: str = /* values here */ +``` + diff --git a/docs/models/tooltypes.md b/docs/models/tooltypes.md new file mode 100644 index 00000000..84e49253 --- /dev/null +++ b/docs/models/tooltypes.md @@ -0,0 +1,8 @@ +# ToolTypes + + +## Values + +| Name | Value | +| ---------- | ---------- | +| `FUNCTION` | function | \ No newline at end of file diff --git a/docs/models/trainingfile.md b/docs/models/trainingfile.md new file mode 100644 index 00000000..cde218bb --- /dev/null +++ b/docs/models/trainingfile.md @@ -0,0 +1,9 @@ +# TrainingFile + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `file_id` | *str* | :heavy_check_mark: | N/A | +| `weight` | *Optional[float]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/transcriptionresponse.md b/docs/models/transcriptionresponse.md new file mode 100644 index 00000000..1bc0189c --- /dev/null +++ b/docs/models/transcriptionresponse.md @@ -0,0 +1,13 @@ +# TranscriptionResponse + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | N/A | +| `text` | *str* | :heavy_check_mark: | N/A | +| `segments` | List[[models.TranscriptionSegmentChunk](../models/transcriptionsegmentchunk.md)] | :heavy_minus_sign: | N/A | +| `usage` | [models.UsageInfo](../models/usageinfo.md) | :heavy_check_mark: | N/A | +| `language` | *Nullable[str]* | :heavy_check_mark: | N/A | +| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/transcriptionsegmentchunk.md b/docs/models/transcriptionsegmentchunk.md new file mode 100644 index 00000000..d7672c0e --- /dev/null +++ b/docs/models/transcriptionsegmentchunk.md @@ -0,0 +1,14 @@ +# TranscriptionSegmentChunk + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------- | -------------------------------------------- | -------------------------------------------- | -------------------------------------------- | +| `type` | *Optional[Literal["transcription_segment"]]* | :heavy_minus_sign: | N/A | +| `text` | *str* | :heavy_check_mark: | N/A | +| `start` | *float* | :heavy_check_mark: | N/A | +| `end` | *float* | :heavy_check_mark: | N/A | +| `score` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | +| `speaker_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/transcriptionstreamdone.md b/docs/models/transcriptionstreamdone.md new file mode 100644 index 00000000..bca69a2b --- /dev/null +++ b/docs/models/transcriptionstreamdone.md @@ -0,0 +1,14 @@ +# TranscriptionStreamDone + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | N/A | +| `text` | *str* | :heavy_check_mark: | N/A | +| `segments` | List[[models.TranscriptionSegmentChunk](../models/transcriptionsegmentchunk.md)] | :heavy_minus_sign: | N/A | +| `usage` | [models.UsageInfo](../models/usageinfo.md) | :heavy_check_mark: | N/A | +| `type` | *Literal["transcription.done"]* | :heavy_check_mark: | N/A | +| `language` | *Nullable[str]* | :heavy_check_mark: | N/A | +| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/transcriptionstreamevents.md b/docs/models/transcriptionstreamevents.md new file mode 100644 index 00000000..f760385d --- /dev/null +++ b/docs/models/transcriptionstreamevents.md @@ -0,0 +1,9 @@ +# TranscriptionStreamEvents + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | +| `event` | [models.TranscriptionStreamEventTypes](../models/transcriptionstreameventtypes.md) | :heavy_check_mark: | N/A | +| `data` | [models.TranscriptionStreamEventsData](../models/transcriptionstreameventsdata.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/transcriptionstreameventsdata.md b/docs/models/transcriptionstreameventsdata.md new file mode 100644 index 00000000..eea8e928 --- /dev/null +++ b/docs/models/transcriptionstreameventsdata.md @@ -0,0 +1,29 @@ +# TranscriptionStreamEventsData + + +## Supported Types + +### `models.TranscriptionStreamDone` + +```python +value: models.TranscriptionStreamDone = /* values here */ +``` + +### `models.TranscriptionStreamLanguage` + +```python +value: models.TranscriptionStreamLanguage = /* values here */ +``` + +### `models.TranscriptionStreamSegmentDelta` + +```python +value: models.TranscriptionStreamSegmentDelta = /* values here */ +``` + +### `models.TranscriptionStreamTextDelta` + +```python +value: models.TranscriptionStreamTextDelta = /* values here */ +``` + diff --git a/docs/models/transcriptionstreameventtypes.md b/docs/models/transcriptionstreameventtypes.md new file mode 100644 index 00000000..e4eb25a6 --- /dev/null +++ b/docs/models/transcriptionstreameventtypes.md @@ -0,0 +1,11 @@ +# TranscriptionStreamEventTypes + + +## Values + +| Name | Value | +| -------------------------- | -------------------------- | +| `TRANSCRIPTION_LANGUAGE` | transcription.language | +| `TRANSCRIPTION_SEGMENT` | transcription.segment | +| `TRANSCRIPTION_TEXT_DELTA` | transcription.text.delta | +| `TRANSCRIPTION_DONE` | transcription.done | \ No newline at end of file diff --git a/docs/models/transcriptionstreamlanguage.md b/docs/models/transcriptionstreamlanguage.md new file mode 100644 index 00000000..63fcfbc6 --- /dev/null +++ b/docs/models/transcriptionstreamlanguage.md @@ -0,0 +1,10 @@ +# TranscriptionStreamLanguage + + +## Fields + +| Field | Type | Required | Description | +| ----------------------------------- | ----------------------------------- | ----------------------------------- | ----------------------------------- | +| `type` | *Literal["transcription.language"]* | :heavy_check_mark: | N/A | +| `audio_language` | *str* | :heavy_check_mark: | N/A | +| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/transcriptionstreamsegmentdelta.md b/docs/models/transcriptionstreamsegmentdelta.md new file mode 100644 index 00000000..1b652a3b --- /dev/null +++ b/docs/models/transcriptionstreamsegmentdelta.md @@ -0,0 +1,13 @@ +# TranscriptionStreamSegmentDelta + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------- | ---------------------------------- | ---------------------------------- | ---------------------------------- | +| `type` | *Literal["transcription.segment"]* | :heavy_check_mark: | N/A | +| `text` | *str* | :heavy_check_mark: | N/A | +| `start` | *float* | :heavy_check_mark: | N/A | +| `end` | *float* | :heavy_check_mark: | N/A | +| `speaker_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/transcriptionstreamtextdelta.md b/docs/models/transcriptionstreamtextdelta.md new file mode 100644 index 00000000..77bd0ddc --- /dev/null +++ b/docs/models/transcriptionstreamtextdelta.md @@ -0,0 +1,10 @@ +# TranscriptionStreamTextDelta + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------- | ------------------------------------- | ------------------------------------- | ------------------------------------- | +| `type` | *Literal["transcription.text.delta"]* | :heavy_check_mark: | N/A | +| `text` | *str* | :heavy_check_mark: | N/A | +| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/unarchivemodelresponse.md b/docs/models/unarchivemodelresponse.md new file mode 100644 index 00000000..375962a7 --- /dev/null +++ b/docs/models/unarchivemodelresponse.md @@ -0,0 +1,10 @@ +# UnarchiveModelResponse + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------- | ---------------------------- | ---------------------------- | ---------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | +| `object` | *Optional[Literal["model"]]* | :heavy_minus_sign: | N/A | +| `archived` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/updateagentrequest.md b/docs/models/updateagentrequest.md new file mode 100644 index 00000000..d3428d92 --- /dev/null +++ b/docs/models/updateagentrequest.md @@ -0,0 +1,17 @@ +# UpdateAgentRequest + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | +| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | Instruction prompt the model will follow during the conversation. | +| `tools` | List[[models.UpdateAgentRequestTool](../models/updateagentrequesttool.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | +| `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `handoffs` | List[*str*] | :heavy_minus_sign: | N/A | +| `deployment_chat` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | +| `version_message` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/updateagentrequesttool.md b/docs/models/updateagentrequesttool.md new file mode 100644 index 00000000..e358b1ed --- /dev/null +++ b/docs/models/updateagentrequesttool.md @@ -0,0 +1,41 @@ +# UpdateAgentRequestTool + + +## Supported Types + +### `models.CodeInterpreterTool` + +```python +value: models.CodeInterpreterTool = /* values here */ +``` + +### `models.DocumentLibraryTool` + +```python +value: models.DocumentLibraryTool = /* values here */ +``` + +### `models.FunctionTool` + +```python +value: models.FunctionTool = /* values here */ +``` + +### `models.ImageGenerationTool` + +```python +value: models.ImageGenerationTool = /* values here */ +``` + +### `models.WebSearchTool` + +```python +value: models.WebSearchTool = /* values here */ +``` + +### `models.WebSearchPremiumTool` + +```python +value: models.WebSearchPremiumTool = /* values here */ +``` + diff --git a/docs/models/updatedocumentrequest.md b/docs/models/updatedocumentrequest.md new file mode 100644 index 00000000..7e0b41b7 --- /dev/null +++ b/docs/models/updatedocumentrequest.md @@ -0,0 +1,9 @@ +# UpdateDocumentRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------- | ------------------------------------------------------- | ------------------------------------------------------- | ------------------------------------------------------- | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `attributes` | Dict[str, [models.Attributes](../models/attributes.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/updatelibraryrequest.md b/docs/models/updatelibraryrequest.md new file mode 100644 index 00000000..aaffc5a9 --- /dev/null +++ b/docs/models/updatelibraryrequest.md @@ -0,0 +1,9 @@ +# UpdateLibraryRequest + + +## Fields + +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/updatemodelrequest.md b/docs/models/updatemodelrequest.md new file mode 100644 index 00000000..56b84c59 --- /dev/null +++ b/docs/models/updatemodelrequest.md @@ -0,0 +1,9 @@ +# UpdateModelRequest + + +## Fields + +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/usageinfo.md b/docs/models/usageinfo.md new file mode 100644 index 00000000..f5204ac9 --- /dev/null +++ b/docs/models/usageinfo.md @@ -0,0 +1,12 @@ +# UsageInfo + + +## Fields + +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `prompt_tokens` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `completion_tokens` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `total_tokens` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `prompt_audio_seconds` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/usermessage.md b/docs/models/usermessage.md new file mode 100644 index 00000000..e7a932ed --- /dev/null +++ b/docs/models/usermessage.md @@ -0,0 +1,9 @@ +# UserMessage + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | +| `role` | *Literal["user"]* | :heavy_check_mark: | N/A | +| `content` | [Nullable[models.UserMessageContent]](../models/usermessagecontent.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/usermessagecontent.md b/docs/models/usermessagecontent.md new file mode 100644 index 00000000..8350f9e8 --- /dev/null +++ b/docs/models/usermessagecontent.md @@ -0,0 +1,17 @@ +# UserMessageContent + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[models.ContentChunk]` + +```python +value: List[models.ContentChunk] = /* values here */ +``` + diff --git a/docs/models/utils/retryconfig.md b/docs/models/utils/retryconfig.md new file mode 100644 index 00000000..69dd549e --- /dev/null +++ b/docs/models/utils/retryconfig.md @@ -0,0 +1,24 @@ +# RetryConfig + +Allows customizing the default retry configuration. Only usable with methods that mention they support retries. + +## Fields + +| Name | Type | Description | Example | +| ------------------------- | ----------------------------------- | --------------------------------------- | --------- | +| `strategy` | `*str*` | The retry strategy to use. | `backoff` | +| `backoff` | [BackoffStrategy](#backoffstrategy) | Configuration for the backoff strategy. | | +| `retry_connection_errors` | `*bool*` | Whether to retry on connection errors. | `true` | + +## BackoffStrategy + +The backoff strategy allows retrying a request with an exponential backoff between each retry. + +### Fields + +| Name | Type | Description | Example | +| ------------------ | --------- | ----------------------------------------- | -------- | +| `initial_interval` | `*int*` | The initial interval in milliseconds. | `500` | +| `max_interval` | `*int*` | The maximum interval in milliseconds. | `60000` | +| `exponent` | `*float*` | The exponent to use for the backoff. | `1.5` | +| `max_elapsed_time` | `*int*` | The maximum elapsed time in milliseconds. | `300000` | \ No newline at end of file diff --git a/docs/models/validationerror.md b/docs/models/validationerror.md new file mode 100644 index 00000000..7a1654a1 --- /dev/null +++ b/docs/models/validationerror.md @@ -0,0 +1,10 @@ +# ValidationError + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------ | ------------------------------------ | ------------------------------------ | ------------------------------------ | +| `loc` | List[[models.Loc](../models/loc.md)] | :heavy_check_mark: | N/A | +| `msg` | *str* | :heavy_check_mark: | N/A | +| `type` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/wandbintegration.md b/docs/models/wandbintegration.md new file mode 100644 index 00000000..c73952d9 --- /dev/null +++ b/docs/models/wandbintegration.md @@ -0,0 +1,12 @@ +# WandbIntegration + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | +| `type` | *Literal["wandb"]* | :heavy_check_mark: | N/A | +| `project` | *str* | :heavy_check_mark: | The name of the project that the new run will be created under. | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | A display name to set for the run. If not set, will use the job ID as the name. | +| `api_key` | *str* | :heavy_check_mark: | The WandB API key to use for authentication. | +| `run_name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/wandbintegrationresult.md b/docs/models/wandbintegrationresult.md new file mode 100644 index 00000000..d12bc311 --- /dev/null +++ b/docs/models/wandbintegrationresult.md @@ -0,0 +1,12 @@ +# WandbIntegrationResult + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | +| `type` | *Literal["wandb"]* | :heavy_check_mark: | N/A | +| `project` | *str* | :heavy_check_mark: | The name of the project that the new run will be created under. | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | A display name to set for the run. If not set, will use the job ID as the name. | +| `run_name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `url` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/websearchpremiumtool.md b/docs/models/websearchpremiumtool.md new file mode 100644 index 00000000..78b736cd --- /dev/null +++ b/docs/models/websearchpremiumtool.md @@ -0,0 +1,9 @@ +# WebSearchPremiumTool + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `tool_configuration` | [OptionalNullable[models.ToolConfiguration]](../models/toolconfiguration.md) | :heavy_minus_sign: | N/A | +| `type` | *Literal["web_search_premium"]* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/websearchtool.md b/docs/models/websearchtool.md new file mode 100644 index 00000000..4ca7333c --- /dev/null +++ b/docs/models/websearchtool.md @@ -0,0 +1,9 @@ +# WebSearchTool + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `tool_configuration` | [OptionalNullable[models.ToolConfiguration]](../models/toolconfiguration.md) | :heavy_minus_sign: | N/A | +| `type` | *Literal["web_search"]* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/sdks/accesses/README.md b/docs/sdks/accesses/README.md new file mode 100644 index 00000000..c50456df --- /dev/null +++ b/docs/sdks/accesses/README.md @@ -0,0 +1,141 @@ +# Beta.Libraries.Accesses + +## Overview + +(beta) Libraries API - manage access to a library. + +### Available Operations + +* [list](#list) - List all of the access to this library. +* [update_or_create](#update_or_create) - Create or update an access level. +* [delete](#delete) - Delete an access level. + +## list + +Given a library, list all of the Entity that have access and to what level. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.libraries.accesses.list(library_id="d2169833-d8e2-416e-a372-76518d3d99c2") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.ListSharingOut](../../models/listsharingout.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## update_or_create + +Given a library id, you can create or update the access level of an entity. You have to be owner of the library to share a library. An owner cannot change their own role. A library cannot be shared outside of the organization. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.libraries.accesses.update_or_create(library_id="36de3a24-5b1c-4c8f-9d84-d5642205a976", level="Viewer", share_with_uuid="0ae92ecb-21ed-47c5-9f7e-0b2cbe325a20", share_with_type="User") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `level` | [models.ShareEnum](../../models/shareenum.md) | :heavy_check_mark: | N/A | +| `share_with_uuid` | *str* | :heavy_check_mark: | The id of the entity (user, workspace or organization) to share with | +| `share_with_type` | [models.EntityType](../../models/entitytype.md) | :heavy_check_mark: | The type of entity, used to share a library. | +| `org_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.SharingOut](../../models/sharingout.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## delete + +Given a library id, you can delete the access level of an entity. An owner cannot delete it's own access. You have to be the owner of the library to delete an acces other than yours. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.libraries.accesses.delete(library_id="709e3cad-9fb2-4f4e-bf88-143cf1808107", share_with_uuid="b843cc47-ce8f-4354-8cfc-5fcd7fb2865b", share_with_type="User") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `share_with_uuid` | *str* | :heavy_check_mark: | The id of the entity (user, workspace or organization) to share with | +| `share_with_type` | [models.EntityType](../../models/entitytype.md) | :heavy_check_mark: | The type of entity, used to share a library. | +| `org_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.SharingOut](../../models/sharingout.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/agents/README.md b/docs/sdks/agents/README.md new file mode 100644 index 00000000..8a608370 --- /dev/null +++ b/docs/sdks/agents/README.md @@ -0,0 +1,138 @@ +# Agents + +## Overview + +Agents API. + +### Available Operations + +* [complete](#complete) - Agents Completion +* [stream](#stream) - Stream Agents completion + +## complete + +Agents Completion + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.agents.complete(messages=[ + { + "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", + }, + ], agent_id="", stream=False, response_format={ + "type": "text", + }) + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | Example | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `messages` | List[[models.AgentsCompletionRequestMessage](../../models/agentscompletionrequestmessage.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `agent_id` | *str* | :heavy_check_mark: | The ID of the agent to use for this completion. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | +| `stop` | [Optional[models.AgentsCompletionRequestStop]](../../models/agentscompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | +| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | **Example 1:** {
"type": "text"
}
**Example 2:** {
"type": "json_object"
}
**Example 3:** {
"type": "json_schema",
"json_schema": {
"schema": {
"properties": {
"name": {
"title": "Name",
"type": "string"
},
"authors": {
"items": {
"type": "string"
},
"title": "Authors",
"type": "array"
}
},
"required": [
"name",
"authors"
],
"title": "Book",
"type": "object",
"additionalProperties": false
},
"name": "book",
"strict": true
}
} | +| `tools` | List[[models.Tool](../../models/tool.md)] | :heavy_minus_sign: | N/A | | +| `tool_choice` | [Optional[models.AgentsCompletionRequestToolChoice]](../../models/agentscompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | +| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | +| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `prediction` | [Optional[models.Prediction]](../../models/prediction.md) | :heavy_minus_sign: | Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. | | +| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | + +### Response + +**[models.ChatCompletionResponse](../../models/chatcompletionresponse.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## stream + +Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.agents.stream(messages=[ + { + "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", + }, + ], agent_id="", stream=True, response_format={ + "type": "text", + }) + + with res as event_stream: + for event in event_stream: + # handle event + print(event, flush=True) + +``` + +### Parameters + +| Parameter | Type | Required | Description | Example | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `messages` | List[[models.AgentsCompletionStreamRequestMessage](../../models/agentscompletionstreamrequestmessage.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `agent_id` | *str* | :heavy_check_mark: | The ID of the agent to use for this completion. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `stop` | [Optional[models.AgentsCompletionStreamRequestStop]](../../models/agentscompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | +| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | **Example 1:** {
"type": "text"
}
**Example 2:** {
"type": "json_object"
}
**Example 3:** {
"type": "json_schema",
"json_schema": {
"schema": {
"properties": {
"name": {
"title": "Name",
"type": "string"
},
"authors": {
"items": {
"type": "string"
},
"title": "Authors",
"type": "array"
}
},
"required": [
"name",
"authors"
],
"title": "Book",
"type": "object",
"additionalProperties": false
},
"name": "book",
"strict": true
}
} | +| `tools` | List[[models.Tool](../../models/tool.md)] | :heavy_minus_sign: | N/A | | +| `tool_choice` | [Optional[models.AgentsCompletionStreamRequestToolChoice]](../../models/agentscompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | +| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | +| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `prediction` | [Optional[models.Prediction]](../../models/prediction.md) | :heavy_minus_sign: | Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. | | +| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | + +### Response + +**[Union[eventstreaming.EventStream[models.CompletionEvent], eventstreaming.EventStreamAsync[models.CompletionEvent]]](../../models/.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/batchjobs/README.md b/docs/sdks/batchjobs/README.md new file mode 100644 index 00000000..3633fe4e --- /dev/null +++ b/docs/sdks/batchjobs/README.md @@ -0,0 +1,188 @@ +# Batch.Jobs + +## Overview + +### Available Operations + +* [list](#list) - Get Batch Jobs +* [create](#create) - Create Batch Job +* [get](#get) - Get Batch Job +* [cancel](#cancel) - Cancel Batch Job + +## list + +Get a list of batch jobs for your organization and user. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.batch.jobs.list(page=0, page_size=100, created_by_me=False, order_by="-created") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `page` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | +| `created_after` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `created_by_me` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `status` | List[[models.BatchJobStatus](../../models/batchjobstatus.md)] | :heavy_minus_sign: | N/A | +| `order_by` | [Optional[models.OrderBy]](../../models/orderby.md) | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.ListBatchJobsResponse](../../models/listbatchjobsresponse.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## create + +Create a new batch job, it will be queued for processing. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.batch.jobs.create(endpoint="/v1/moderations", model="mistral-small-latest", timeout_hours=24) + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | Example | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `endpoint` | [models.APIEndpoint](../../models/apiendpoint.md) | :heavy_check_mark: | N/A | | +| `input_files` | List[*str*] | :heavy_minus_sign: | The list of input files to be used for batch inference, these files should be `jsonl` files, containing the input data corresponding to the bory request for the batch inference in a "body" field. An example of such file is the following: ```json {"custom_id": "0", "body": {"max_tokens": 100, "messages": [{"role": "user", "content": "What is the best French cheese?"}]}} {"custom_id": "1", "body": {"max_tokens": 100, "messages": [{"role": "user", "content": "What is the best French wine?"}]}} ``` | | +| `requests` | List[[models.BatchRequest](../../models/batchrequest.md)] | :heavy_minus_sign: | N/A | | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The model to be used for batch inference. | **Example 1:** mistral-small-latest
**Example 2:** mistral-medium-latest | +| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | In case you want to use a specific agent from the **deprecated** agents api for batch inference, you can specify the agent ID here. | | +| `metadata` | Dict[str, *str*] | :heavy_minus_sign: | The metadata of your choice to be associated with the batch inference job. | | +| `timeout_hours` | *Optional[int]* | :heavy_minus_sign: | The timeout in hours for the batch inference job. | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | + +### Response + +**[models.BatchJob](../../models/batchjob.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## get + +Get a batch job details by its UUID. + +Args: + inline: If True, return results inline in the response. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.batch.jobs.get(job_id="4017dc9f-b629-42f4-9700-8c681b9e7f0f") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `job_id` | *str* | :heavy_check_mark: | N/A | +| `inline` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.BatchJob](../../models/batchjob.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## cancel + +Request the cancellation of a batch job. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.batch.jobs.cancel(job_id="4fb29d1c-535b-4f0a-a1cb-2167f86da569") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `job_id` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.BatchJob](../../models/batchjob.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| errors.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/betaagents/README.md b/docs/sdks/betaagents/README.md new file mode 100644 index 00000000..aaa5110e --- /dev/null +++ b/docs/sdks/betaagents/README.md @@ -0,0 +1,501 @@ +# Beta.Agents + +## Overview + +(beta) Agents API + +### Available Operations + +* [create](#create) - Create a agent that can be used within a conversation. +* [list](#list) - List agent entities. +* [get](#get) - Retrieve an agent entity. +* [update](#update) - Update an agent entity. +* [delete](#delete) - Delete an agent entity. +* [update_version](#update_version) - Update an agent version. +* [list_versions](#list_versions) - List all versions of an agent. +* [get_version](#get_version) - Retrieve a specific version of an agent. +* [create_version_alias](#create_version_alias) - Create or update an agent version alias. +* [list_version_aliases](#list_version_aliases) - List all aliases for an agent. +* [delete_version_alias](#delete_version_alias) - Delete an agent version alias. + +## create + +Create a new agent giving it instructions, tools, description. The agent is then available to be used as a regular assistant in a conversation or as part of an agent pool from which it can be used. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.agents.create(model="LeBaron", name="", completion_args={ + "response_format": { + "type": "text", + }, + }) + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | N/A | +| `name` | *str* | :heavy_check_mark: | N/A | +| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | Instruction prompt the model will follow during the conversation. | +| `tools` | List[[models.CreateAgentRequestTool](../../models/createagentrequesttool.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | +| `completion_args` | [Optional[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `handoffs` | List[*str*] | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | +| `version_message` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.Agent](../../models/agent.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## list + +Retrieve a list of agent entities sorted by creation time. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.agents.list(page=0, page_size=20) + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `page` | *Optional[int]* | :heavy_minus_sign: | Page number (0-indexed) | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | Number of agents per page | +| `deployment_chat` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | +| `sources` | List[[models.RequestSource](../../models/requestsource.md)] | :heavy_minus_sign: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | Filter by agent name | +| `search` | *OptionalNullable[str]* | :heavy_minus_sign: | Search agents by name or ID | +| `id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[List[models.Agent]](../../models/.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## get + +Given an agent, retrieve an agent entity with its attributes. The agent_version parameter can be an integer version number or a string alias. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.agents.get(agent_id="") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `agent_version` | [OptionalNullable[models.AgentsAPIV1AgentsGetAgentVersion]](../../models/agentsapiv1agentsgetagentversion.md) | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.Agent](../../models/agent.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## update + +Update an agent attributes and create a new version. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.agents.update(agent_id="", completion_args={ + "response_format": { + "type": "text", + }, + }) + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | Instruction prompt the model will follow during the conversation. | +| `tools` | List[[models.UpdateAgentRequestTool](../../models/updateagentrequesttool.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | +| `completion_args` | [Optional[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `handoffs` | List[*str*] | :heavy_minus_sign: | N/A | +| `deployment_chat` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | +| `version_message` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.Agent](../../models/agent.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## delete + +Delete an agent entity. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + mistral.beta.agents.delete(agent_id="") + + # Use the SDK ... + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## update_version + +Switch the version of an agent. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.agents.update_version(agent_id="", version=157995) + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `version` | *int* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.Agent](../../models/agent.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## list_versions + +Retrieve all versions for a specific agent with full agent context. Supports pagination. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.agents.list_versions(agent_id="", page=0, page_size=20) + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `page` | *Optional[int]* | :heavy_minus_sign: | Page number (0-indexed) | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | Number of versions per page | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[List[models.Agent]](../../models/.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## get_version + +Get a specific agent version by version number. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.agents.get_version(agent_id="", version="788393") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `version` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.Agent](../../models/agent.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## create_version_alias + +Create a new alias or update an existing alias to point to a specific version. Aliases are unique per agent and can be reassigned to different versions. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.agents.create_version_alias(agent_id="", alias="", version=595141) + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `alias` | *str* | :heavy_check_mark: | N/A | +| `version` | *int* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.AgentAliasResponse](../../models/agentaliasresponse.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## list_version_aliases + +Retrieve all version aliases for a specific agent. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.agents.list_version_aliases(agent_id="") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[List[models.AgentAliasResponse]](../../models/.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## delete_version_alias + +Delete an existing alias for an agent. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + mistral.beta.agents.delete_version_alias(agent_id="", alias="") + + # Use the SDK ... + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `alias` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/chat/README.md b/docs/sdks/chat/README.md new file mode 100644 index 00000000..1bf4aead --- /dev/null +++ b/docs/sdks/chat/README.md @@ -0,0 +1,144 @@ +# Chat + +## Overview + +Chat Completion API. + +### Available Operations + +* [complete](#complete) - Chat Completion +* [stream](#stream) - Stream chat completion + +## complete + +Chat Completion + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.chat.complete(model="mistral-large-latest", messages=[ + { + "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", + }, + ], stream=False, response_format={ + "type": "text", + }) + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | Example | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-large-latest | +| `messages` | List[[models.ChatCompletionRequestMessage](../../models/chatcompletionrequestmessage.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | +| `stop` | [Optional[models.ChatCompletionRequestStop]](../../models/chatcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | +| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | **Example 1:** {
"type": "text"
}
**Example 2:** {
"type": "json_object"
}
**Example 3:** {
"type": "json_schema",
"json_schema": {
"schema": {
"properties": {
"name": {
"title": "Name",
"type": "string"
},
"authors": {
"items": {
"type": "string"
},
"title": "Authors",
"type": "array"
}
},
"required": [
"name",
"authors"
],
"title": "Book",
"type": "object",
"additionalProperties": false
},
"name": "book",
"strict": true
}
} | +| `tools` | List[[models.Tool](../../models/tool.md)] | :heavy_minus_sign: | A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. | | +| `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. | | +| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | +| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `prediction` | [Optional[models.Prediction]](../../models/prediction.md) | :heavy_minus_sign: | Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. | | +| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | +| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | + +### Response + +**[models.ChatCompletionResponse](../../models/chatcompletionresponse.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## stream + +Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.chat.stream(model="mistral-large-latest", messages=[ + { + "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", + }, + ], stream=True, response_format={ + "type": "text", + }) + + with res as event_stream: + for event in event_stream: + # handle event + print(event, flush=True) + +``` + +### Parameters + +| Parameter | Type | Required | Description | Example | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-large-latest | +| `messages` | List[[models.ChatCompletionStreamRequestMessage](../../models/chatcompletionstreamrequestmessage.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `stop` | [Optional[models.ChatCompletionStreamRequestStop]](../../models/chatcompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | +| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | **Example 1:** {
"type": "text"
}
**Example 2:** {
"type": "json_object"
}
**Example 3:** {
"type": "json_schema",
"json_schema": {
"schema": {
"properties": {
"name": {
"title": "Name",
"type": "string"
},
"authors": {
"items": {
"type": "string"
},
"title": "Authors",
"type": "array"
}
},
"required": [
"name",
"authors"
],
"title": "Book",
"type": "object",
"additionalProperties": false
},
"name": "book",
"strict": true
}
} | +| `tools` | List[[models.Tool](../../models/tool.md)] | :heavy_minus_sign: | A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. | | +| `tool_choice` | [Optional[models.ChatCompletionStreamRequestToolChoice]](../../models/chatcompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. | | +| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | +| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `prediction` | [Optional[models.Prediction]](../../models/prediction.md) | :heavy_minus_sign: | Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. | | +| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | +| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | + +### Response + +**[Union[eventstreaming.EventStream[models.CompletionEvent], eventstreaming.EventStreamAsync[models.CompletionEvent]]](../../models/.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/classifiers/README.md b/docs/sdks/classifiers/README.md new file mode 100644 index 00000000..dc0f4984 --- /dev/null +++ b/docs/sdks/classifiers/README.md @@ -0,0 +1,198 @@ +# Classifiers + +## Overview + +Classifiers API. + +### Available Operations + +* [moderate](#moderate) - Moderations +* [moderate_chat](#moderate_chat) - Chat Moderations +* [classify](#classify) - Classifications +* [classify_chat](#classify_chat) - Chat Classifications + +## moderate + +Moderations + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.classifiers.moderate(model="mistral-moderation-latest", inputs="") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | Example | +| --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. | mistral-moderation-latest | +| `inputs` | [models.ClassificationRequestInputs](../../models/classificationrequestinputs.md) | :heavy_check_mark: | Text to classify. | | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | + +### Response + +**[models.ModerationResponse](../../models/moderationresponse.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## moderate_chat + +Chat Moderations + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.classifiers.moderate_chat(inputs=[ + { + "role": "tool", + "content": "", + }, + ], model="LeBaron") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ----------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------- | +| `inputs` | [models.ChatModerationRequestInputs3](../../models/chatmoderationrequestinputs3.md) | :heavy_check_mark: | Chat to classify | +| `model` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.ModerationResponse](../../models/moderationresponse.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## classify + +Classifications + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.classifiers.classify(model="mistral-moderation-latest", inputs=[ + "", + ]) + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | Example | +| --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. | mistral-moderation-latest | +| `inputs` | [models.ClassificationRequestInputs](../../models/classificationrequestinputs.md) | :heavy_check_mark: | Text to classify. | | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | + +### Response + +**[models.ClassificationResponse](../../models/classificationresponse.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## classify_chat + +Chat Classifications + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.classifiers.classify_chat(model="Camry", input=[ + { + "messages": [ + { + "role": "system", + "content": "", + }, + ], + }, + ]) + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | N/A | +| `input` | [models.Inputs](../../models/inputs.md) | :heavy_check_mark: | Chat to classify | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.ClassificationResponse](../../models/classificationresponse.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/conversations/README.md b/docs/sdks/conversations/README.md new file mode 100644 index 00000000..e77d329b --- /dev/null +++ b/docs/sdks/conversations/README.md @@ -0,0 +1,556 @@ +# Beta.Conversations + +## Overview + +(beta) Conversations API + +### Available Operations + +* [start](#start) - Create a conversation and append entries to it. +* [list](#list) - List all created conversations. +* [get](#get) - Retrieve a conversation information. +* [delete](#delete) - Delete a conversation. +* [append](#append) - Append new entries to an existing conversation. +* [get_history](#get_history) - Retrieve all entries in a conversation. +* [get_messages](#get_messages) - Retrieve all messages in a conversation. +* [restart](#restart) - Restart a conversation starting from a given entry. +* [start_stream](#start_stream) - Create a conversation and append entries to it. +* [append_stream](#append_stream) - Append new entries to an existing conversation. +* [restart_stream](#restart_stream) - Restart a conversation starting from a given entry. + +## start + +Create a new conversation, using a base model or an agent and append entries. Completion and tool executions are run and the response is appended to the conversation.Use the returned conversation_id to continue the conversation. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.conversations.start(inputs="", stream=False, completion_args={ + "response_format": { + "type": "text", + }, + }) + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------- | +| `inputs` | [models.ConversationInputs](../../models/conversationinputs.md) | :heavy_check_mark: | N/A | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `store` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | +| `handoff_execution` | [OptionalNullable[models.ConversationRequestHandoffExecution]](../../models/conversationrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | +| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `tools` | List[[models.ConversationRequestTool](../../models/conversationrequesttool.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | +| `completion_args` | [OptionalNullable[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | +| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `agent_version` | [OptionalNullable[models.ConversationRequestAgentVersion]](../../models/conversationrequestagentversion.md) | :heavy_minus_sign: | N/A | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.ConversationResponse](../../models/conversationresponse.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## list + +Retrieve a list of conversation entities sorted by creation time. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.conversations.list(page=0, page_size=100) + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `page` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[List[models.AgentsAPIV1ConversationsListResponse]](../../models/.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## get + +Given a conversation_id retrieve a conversation entity with its attributes. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.conversations.get(conversation_id="") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `conversation_id` | *str* | :heavy_check_mark: | ID of the conversation from which we are fetching metadata. | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.ResponseV1ConversationsGet](../../models/responsev1conversationsget.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## delete + +Delete a conversation given a conversation_id. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + mistral.beta.conversations.delete(conversation_id="") + + # Use the SDK ... + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `conversation_id` | *str* | :heavy_check_mark: | ID of the conversation from which we are fetching metadata. | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## append + +Run completion on the history of the conversation and the user entries. Return the new created entries. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.conversations.append(conversation_id="", stream=False, store=True, handoff_execution="server", completion_args={ + "response_format": { + "type": "text", + }, + }) + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ----------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------- | +| `conversation_id` | *str* | :heavy_check_mark: | ID of the conversation to which we append entries. | +| `inputs` | [Optional[models.ConversationInputs]](../../models/conversationinputs.md) | :heavy_minus_sign: | N/A | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `store` | *Optional[bool]* | :heavy_minus_sign: | Whether to store the results into our servers or not. | +| `handoff_execution` | [Optional[models.ConversationAppendRequestHandoffExecution]](../../models/conversationappendrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | +| `completion_args` | [Optional[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `tool_confirmations` | List[[models.ToolCallConfirmation](../../models/toolcallconfirmation.md)] | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.ConversationResponse](../../models/conversationresponse.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## get_history + +Given a conversation_id retrieve all the entries belonging to that conversation. The entries are sorted in the order they were appended, those can be messages, connectors or function_call. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.conversations.get_history(conversation_id="") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `conversation_id` | *str* | :heavy_check_mark: | ID of the conversation from which we are fetching entries. | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.ConversationHistory](../../models/conversationhistory.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## get_messages + +Given a conversation_id retrieve all the messages belonging to that conversation. This is similar to retrieving all entries except we filter the messages only. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.conversations.get_messages(conversation_id="") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `conversation_id` | *str* | :heavy_check_mark: | ID of the conversation from which we are fetching messages. | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.ConversationMessages](../../models/conversationmessages.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## restart + +Given a conversation_id and an id, recreate a conversation from this point and run completion. A new conversation is returned with the new entries returned. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.conversations.restart(conversation_id="", from_entry_id="", stream=False, store=True, handoff_execution="server", completion_args={ + "response_format": { + "type": "text", + }, + }) + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------- | +| `conversation_id` | *str* | :heavy_check_mark: | ID of the original conversation which is being restarted. | +| `from_entry_id` | *str* | :heavy_check_mark: | N/A | +| `inputs` | [Optional[models.ConversationInputs]](../../models/conversationinputs.md) | :heavy_minus_sign: | N/A | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `store` | *Optional[bool]* | :heavy_minus_sign: | Whether to store the results into our servers or not. | +| `handoff_execution` | [Optional[models.ConversationRestartRequestHandoffExecution]](../../models/conversationrestartrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | +| `completion_args` | [Optional[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | Custom metadata for the conversation. | +| `agent_version` | [OptionalNullable[models.ConversationRestartRequestAgentVersion]](../../models/conversationrestartrequestagentversion.md) | :heavy_minus_sign: | Specific version of the agent to use when restarting. If not provided, uses the current version. | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.ConversationResponse](../../models/conversationresponse.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## start_stream + +Create a new conversation, using a base model or an agent and append entries. Completion and tool executions are run and the response is appended to the conversation.Use the returned conversation_id to continue the conversation. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.conversations.start_stream(inputs=[ + { + "object": "entry", + "type": "function.result", + "tool_call_id": "", + "result": "", + }, + ], stream=True, completion_args={ + "response_format": { + "type": "text", + }, + }) + + with res as event_stream: + for event in event_stream: + # handle event + print(event, flush=True) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------- | +| `inputs` | [models.ConversationInputs](../../models/conversationinputs.md) | :heavy_check_mark: | N/A | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `store` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | +| `handoff_execution` | [OptionalNullable[models.ConversationStreamRequestHandoffExecution]](../../models/conversationstreamrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | +| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `tools` | List[[models.ConversationStreamRequestTool](../../models/conversationstreamrequesttool.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | +| `completion_args` | [OptionalNullable[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | +| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `agent_version` | [OptionalNullable[models.ConversationStreamRequestAgentVersion]](../../models/conversationstreamrequestagentversion.md) | :heavy_minus_sign: | N/A | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[Union[eventstreaming.EventStream[models.ConversationEvents], eventstreaming.EventStreamAsync[models.ConversationEvents]]](../../models/.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## append_stream + +Run completion on the history of the conversation and the user entries. Return the new created entries. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.conversations.append_stream(conversation_id="", stream=True, store=True, handoff_execution="server", completion_args={ + "response_format": { + "type": "text", + }, + }) + + with res as event_stream: + for event in event_stream: + # handle event + print(event, flush=True) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ----------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------- | +| `conversation_id` | *str* | :heavy_check_mark: | ID of the conversation to which we append entries. | +| `inputs` | [Optional[models.ConversationInputs]](../../models/conversationinputs.md) | :heavy_minus_sign: | N/A | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `store` | *Optional[bool]* | :heavy_minus_sign: | Whether to store the results into our servers or not. | +| `handoff_execution` | [Optional[models.ConversationAppendStreamRequestHandoffExecution]](../../models/conversationappendstreamrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | +| `completion_args` | [Optional[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `tool_confirmations` | List[[models.ToolCallConfirmation](../../models/toolcallconfirmation.md)] | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[Union[eventstreaming.EventStream[models.ConversationEvents], eventstreaming.EventStreamAsync[models.ConversationEvents]]](../../models/.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## restart_stream + +Given a conversation_id and an id, recreate a conversation from this point and run completion. A new conversation is returned with the new entries returned. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.conversations.restart_stream(conversation_id="", from_entry_id="", stream=True, store=True, handoff_execution="server", completion_args={ + "response_format": { + "type": "text", + }, + }) + + with res as event_stream: + for event in event_stream: + # handle event + print(event, flush=True) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------- | +| `conversation_id` | *str* | :heavy_check_mark: | ID of the original conversation which is being restarted. | +| `from_entry_id` | *str* | :heavy_check_mark: | N/A | +| `inputs` | [Optional[models.ConversationInputs]](../../models/conversationinputs.md) | :heavy_minus_sign: | N/A | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `store` | *Optional[bool]* | :heavy_minus_sign: | Whether to store the results into our servers or not. | +| `handoff_execution` | [Optional[models.ConversationRestartStreamRequestHandoffExecution]](../../models/conversationrestartstreamrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | +| `completion_args` | [Optional[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | Custom metadata for the conversation. | +| `agent_version` | [OptionalNullable[models.ConversationRestartStreamRequestAgentVersion]](../../models/conversationrestartstreamrequestagentversion.md) | :heavy_minus_sign: | Specific version of the agent to use when restarting. If not provided, uses the current version. | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[Union[eventstreaming.EventStream[models.ConversationEvents], eventstreaming.EventStreamAsync[models.ConversationEvents]]](../../models/.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/documents/README.md b/docs/sdks/documents/README.md new file mode 100644 index 00000000..9c219b67 --- /dev/null +++ b/docs/sdks/documents/README.md @@ -0,0 +1,438 @@ +# Beta.Libraries.Documents + +## Overview + +(beta) Libraries API - manage documents in a library. + +### Available Operations + +* [list](#list) - List documents in a given library. +* [upload](#upload) - Upload a new document. +* [get](#get) - Retrieve the metadata of a specific document. +* [update](#update) - Update the metadata of a specific document. +* [delete](#delete) - Delete a document. +* [text_content](#text_content) - Retrieve the text content of a specific document. +* [status](#status) - Retrieve the processing status of a specific document. +* [get_signed_url](#get_signed_url) - Retrieve the signed URL of a specific document. +* [extracted_text_signed_url](#extracted_text_signed_url) - Retrieve the signed URL of text extracted from a given document. +* [reprocess](#reprocess) - Reprocess a document. + +## list + +Given a library, lists the document that have been uploaded to that library. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.libraries.documents.list(library_id="5c3ca4cd-62bc-4c71-ad8a-1531ae80d078", page_size=100, page=0, sort_by="created_at", sort_order="desc") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `search` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `page` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `filters_attributes` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `sort_by` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `sort_order` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.ListDocumentsResponse](../../models/listdocumentsresponse.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## upload + +Given a library, upload a new document to that library. It is queued for processing, it status will change it has been processed. The processing has to be completed in order be discoverable for the library search + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.libraries.documents.upload(library_id="a02150d9-5ee0-4877-b62c-28b1fcdf3b76", file={ + "file_name": "example.file", + "content": open("example.file", "rb"), + }) + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `file` | [models.File](../../models/file.md) | :heavy_check_mark: | The File object (not file name) to be uploaded.
To upload a file and specify a custom file name you should format your request as such:
```bash
file=@path/to/your/file.jsonl;filename=custom_name.jsonl
```
Otherwise, you can just keep the original file name:
```bash
file=@path/to/your/file.jsonl
``` | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.Document](../../models/document.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## get + +Given a library and a document in this library, you can retrieve the metadata of that document. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.libraries.documents.get(library_id="03d908c8-90a1-44fd-bf3a-8490fb7c9a03", document_id="90973aec-0508-4375-8b00-91d732414745") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `document_id` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.Document](../../models/document.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## update + +Given a library and a document in that library, update the name of that document. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.libraries.documents.update(library_id="3ddd8d93-dca5-4a6d-980d-173226c35742", document_id="2a25e44c-b160-40ca-b5c2-b65fb2fcae34") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `document_id` | *str* | :heavy_check_mark: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `attributes` | Dict[str, [models.Attributes](../../models/attributes.md)] | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.Document](../../models/document.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## delete + +Given a library and a document in that library, delete that document. The document will be deleted from the library and the search index. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + mistral.beta.libraries.documents.delete(library_id="005daae9-d42e-407d-82d7-2261c6a1496c", document_id="edc236b0-baff-49a9-884b-4ca36a258da4") + + # Use the SDK ... + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `document_id` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## text_content + +Given a library and a document in that library, you can retrieve the text content of that document if it exists. For documents like pdf, docx and pptx the text content results from our processing using Mistral OCR. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.libraries.documents.text_content(library_id="1d177215-3b6b-45ba-9fa9-baf773223bec", document_id="60214c91-2aba-4692-a4e6-a53365de8caf") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `document_id` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.DocumentTextContent](../../models/documenttextcontent.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## status + +Given a library and a document in that library, retrieve the processing status of that document. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.libraries.documents.status(library_id="e6906f70-368f-4155-80da-c1718f01bc43", document_id="2c904915-d831-4e9d-a345-8ce405bcef66") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `document_id` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.ProcessingStatusOut](../../models/processingstatusout.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## get_signed_url + +Given a library and a document in that library, retrieve the signed URL of a specific document.The url will expire after 30 minutes and can be accessed by anyone with the link. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.libraries.documents.get_signed_url(library_id="23cf6904-a602-4ee8-9f5b-8efc557c336d", document_id="48598486-df71-4994-acbb-1133c72efa8c") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `document_id` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[str](../../models/responselibrariesdocumentsgetsignedurlv1.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## extracted_text_signed_url + +Given a library and a document in that library, retrieve the signed URL of text extracted. For documents that are sent to the OCR this returns the result of the OCR queries. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.libraries.documents.extracted_text_signed_url(library_id="a6f15de3-1e82-4f95-af82-851499042ef8", document_id="9749d4f9-24e5-4ca2-99a3-a406863f805d") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `document_id` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[str](../../models/responselibrariesdocumentsgetextractedtextsignedurlv1.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## reprocess + +Given a library and a document in that library, reprocess that document, it will be billed again. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + mistral.beta.libraries.documents.reprocess(library_id="51b29371-de8f-4ba4-932b-a0bafb3a7f64", document_id="3052422c-49ca-45ac-a918-cadb35d61fd8") + + # Use the SDK ... + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `document_id` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/embeddings/README.md b/docs/sdks/embeddings/README.md new file mode 100644 index 00000000..cb207d8b --- /dev/null +++ b/docs/sdks/embeddings/README.md @@ -0,0 +1,58 @@ +# Embeddings + +## Overview + +Embeddings API. + +### Available Operations + +* [create](#create) - Embeddings + +## create + +Embeddings + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.embeddings.create(model="mistral-embed", inputs=[ + "Embed this sentence.", + "As well as this one.", + ]) + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | Example | +| ------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------ | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. | mistral-embed | +| `inputs` | [models.EmbeddingRequestInputs](../../models/embeddingrequestinputs.md) | :heavy_check_mark: | Text to embed. | [
"Embed this sentence.",
"As well as this one."
] | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | +| `output_dimension` | *OptionalNullable[int]* | :heavy_minus_sign: | The dimension of the output embeddings when feature available. If not provided, a default output dimension will be used. | | +| `output_dtype` | [Optional[models.EmbeddingDtype]](../../models/embeddingdtype.md) | :heavy_minus_sign: | N/A | | +| `encoding_format` | [Optional[models.EncodingFormat]](../../models/encodingformat.md) | :heavy_minus_sign: | N/A | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | + +### Response + +**[models.EmbeddingResponse](../../models/embeddingresponse.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/files/README.md b/docs/sdks/files/README.md new file mode 100644 index 00000000..9507326b --- /dev/null +++ b/docs/sdks/files/README.md @@ -0,0 +1,270 @@ +# Files + +## Overview + +Files API + +### Available Operations + +* [upload](#upload) - Upload File +* [list](#list) - List Files +* [retrieve](#retrieve) - Retrieve File +* [delete](#delete) - Delete File +* [download](#download) - Download File +* [get_signed_url](#get_signed_url) - Get Signed Url + +## upload + +Upload a file that can be used across various endpoints. + +The size of individual files can be a maximum of 512 MB. The Fine-tuning API only supports .jsonl files. + +Please contact us if you need to increase these storage limits. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.files.upload(file={ + "file_name": "example.file", + "content": open("example.file", "rb"), + }) + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `file` | [models.File](../../models/file.md) | :heavy_check_mark: | The File object (not file name) to be uploaded.
To upload a file and specify a custom file name you should format your request as such:
```bash
file=@path/to/your/file.jsonl;filename=custom_name.jsonl
```
Otherwise, you can just keep the original file name:
```bash
file=@path/to/your/file.jsonl
``` | +| `purpose` | [Optional[models.FilePurpose]](../../models/filepurpose.md) | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.CreateFileResponse](../../models/createfileresponse.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## list + +Returns a list of files that belong to the user's organization. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.files.list(page=0, page_size=100, include_total=True) + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `page` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `include_total` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `sample_type` | List[[models.SampleType](../../models/sampletype.md)] | :heavy_minus_sign: | N/A | +| `source` | List[[models.Source](../../models/source.md)] | :heavy_minus_sign: | N/A | +| `search` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `purpose` | [OptionalNullable[models.FilePurpose]](../../models/filepurpose.md) | :heavy_minus_sign: | N/A | +| `mimetypes` | List[*str*] | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.ListFilesResponse](../../models/listfilesresponse.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## retrieve + +Returns information about a specific file. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.files.retrieve(file_id="f2a27685-ca4e-4dc2-9f2b-88c422c3e0f6") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `file_id` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.GetFileResponse](../../models/getfileresponse.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## delete + +Delete a file. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.files.delete(file_id="3b6d45eb-e30b-416f-8019-f47e2e93d930") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `file_id` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.DeleteFileResponse](../../models/deletefileresponse.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## download + +Download a file + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.files.download(file_id="f8919994-a4a1-46b2-8b5b-06335a4300ce") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `file_id` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[httpx.Response](../../models/.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## get_signed_url + +Get Signed Url + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.files.get_signed_url(file_id="06a020ab-355c-49a6-b19d-304b7c01699f", expiry=24) + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `file_id` | *str* | :heavy_check_mark: | N/A | +| `expiry` | *Optional[int]* | :heavy_minus_sign: | Number of hours before the url becomes invalid. Defaults to 24h | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.GetSignedURLResponse](../../models/getsignedurlresponse.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| errors.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/fim/README.md b/docs/sdks/fim/README.md new file mode 100644 index 00000000..49151bf5 --- /dev/null +++ b/docs/sdks/fim/README.md @@ -0,0 +1,114 @@ +# Fim + +## Overview + +Fill-in-the-middle API. + +### Available Operations + +* [complete](#complete) - Fim Completion +* [stream](#stream) - Stream fim completion + +## complete + +FIM completion. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.fim.complete(model="codestral-latest", prompt="def", top_p=1, stream=False, suffix="return a+b") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | Example | +| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | ID of the model with FIM to use. | codestral-latest | +| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | +| `stop` | [Optional[models.FIMCompletionRequestStop]](../../models/fimcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | + +### Response + +**[models.FIMCompletionResponse](../../models/fimcompletionresponse.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## stream + +Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.fim.stream(model="codestral-latest", prompt="def", top_p=1, stream=True, suffix="return a+b") + + with res as event_stream: + for event in event_stream: + # handle event + print(event, flush=True) + +``` + +### Parameters + +| Parameter | Type | Required | Description | Example | +| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | ID of the model with FIM to use. | codestral-latest | +| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `stop` | [Optional[models.FIMCompletionStreamRequestStop]](../../models/fimcompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | + +### Response + +**[Union[eventstreaming.EventStream[models.CompletionEvent], eventstreaming.EventStreamAsync[models.CompletionEvent]]](../../models/.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/finetuningjobs/README.md b/docs/sdks/finetuningjobs/README.md new file mode 100644 index 00000000..4262b3a9 --- /dev/null +++ b/docs/sdks/finetuningjobs/README.md @@ -0,0 +1,232 @@ +# FineTuning.Jobs + +## Overview + +### Available Operations + +* [list](#list) - Get Fine Tuning Jobs +* [create](#create) - Create Fine Tuning Job +* [get](#get) - Get Fine Tuning Job +* [cancel](#cancel) - Cancel Fine Tuning Job +* [start](#start) - Start Fine Tuning Job + +## list + +Get a list of fine-tuning jobs for your organization and user. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.fine_tuning.jobs.list(page=0, page_size=100, created_by_me=False) + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ----------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------- | +| `page` | *Optional[int]* | :heavy_minus_sign: | The page number of the results to be returned. | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | The number of items to return per page. | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The model name used for fine-tuning to filter on. When set, the other results are not displayed. | +| `created_after` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | The date/time to filter on. When set, the results for previous creation times are not displayed. | +| `created_before` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `created_by_me` | *Optional[bool]* | :heavy_minus_sign: | When set, only return results for jobs created by the API caller. Other results are not displayed. | +| `status` | [OptionalNullable[models.JobsAPIRoutesFineTuningGetFineTuningJobsStatus]](../../models/jobsapiroutesfinetuninggetfinetuningjobsstatus.md) | :heavy_minus_sign: | The current job state to filter on. When set, the other results are not displayed. | +| `wandb_project` | *OptionalNullable[str]* | :heavy_minus_sign: | The Weights and Biases project to filter on. When set, the other results are not displayed. | +| `wandb_name` | *OptionalNullable[str]* | :heavy_minus_sign: | The Weight and Biases run name to filter on. When set, the other results are not displayed. | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | The model suffix to filter on. When set, the other results are not displayed. | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.ListFineTuningJobsResponse](../../models/listfinetuningjobsresponse.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## create + +Create a new fine-tuning job, it will be queued for processing. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.fine_tuning.jobs.create(model="Camaro", hyperparameters={ + "learning_rate": 0.0001, + }, invalid_sample_skip_percentage=0) + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | N/A | +| `hyperparameters` | [models.Hyperparameters](../../models/hyperparameters.md) | :heavy_check_mark: | N/A | +| `training_files` | List[[models.TrainingFile](../../models/trainingfile.md)] | :heavy_minus_sign: | N/A | +| `validation_files` | List[*str*] | :heavy_minus_sign: | A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files. | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | A string that will be added to your fine-tuning model name. For example, a suffix of "my-great-model" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...` | +| `integrations` | List[[models.CreateFineTuningJobRequestIntegration](../../models/createfinetuningjobrequestintegration.md)] | :heavy_minus_sign: | A list of integrations to enable for your fine-tuning job. | +| `auto_start` | *Optional[bool]* | :heavy_minus_sign: | This field will be required in a future release. | +| `invalid_sample_skip_percentage` | *Optional[float]* | :heavy_minus_sign: | N/A | +| `job_type` | [OptionalNullable[models.FineTuneableModelType]](../../models/finetuneablemodeltype.md) | :heavy_minus_sign: | N/A | +| `repositories` | List[[models.CreateFineTuningJobRequestRepository](../../models/createfinetuningjobrequestrepository.md)] | :heavy_minus_sign: | N/A | +| `classifier_targets` | List[[models.ClassifierTarget](../../models/classifiertarget.md)] | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse](../../models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## get + +Get a fine-tuned job details by its UUID. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.fine_tuning.jobs.get(job_id="c167a961-ffca-4bcf-93ac-6169468dd389") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `job_id` | *str* | :heavy_check_mark: | The ID of the job to analyse. | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.JobsAPIRoutesFineTuningGetFineTuningJobResponse](../../models/jobsapiroutesfinetuninggetfinetuningjobresponse.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## cancel + +Request the cancellation of a fine tuning job. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.fine_tuning.jobs.cancel(job_id="6188a2f6-7513-4e0f-89cc-3f8088523a49") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `job_id` | *str* | :heavy_check_mark: | The ID of the job to cancel. | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.JobsAPIRoutesFineTuningCancelFineTuningJobResponse](../../models/jobsapiroutesfinetuningcancelfinetuningjobresponse.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## start + +Request the start of a validated fine tuning job. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.fine_tuning.jobs.start(job_id="56553e4d-0679-471e-b9ac-59a77d671103") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `job_id` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.JobsAPIRoutesFineTuningStartFineTuningJobResponse](../../models/jobsapiroutesfinetuningstartfinetuningjobresponse.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| errors.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/libraries/README.md b/docs/sdks/libraries/README.md new file mode 100644 index 00000000..7df1ef4e --- /dev/null +++ b/docs/sdks/libraries/README.md @@ -0,0 +1,220 @@ +# Beta.Libraries + +## Overview + +(beta) Libraries API to create and manage libraries - index your documents to enhance agent capabilities. + +### Available Operations + +* [list](#list) - List all libraries you have access to. +* [create](#create) - Create a new Library. +* [get](#get) - Detailed information about a specific Library. +* [delete](#delete) - Delete a library and all of it's document. +* [update](#update) - Update a library. + +## list + +List all libraries that you have created or have been shared with you. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.libraries.list() + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.ListLibrariesResponse](../../models/listlibrariesresponse.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## create + +Create a new Library, you will be marked as the owner and only you will have the possibility to share it with others. When first created this will only be accessible by you. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.libraries.create(name="") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `name` | *str* | :heavy_check_mark: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `chunk_size` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.Library](../../models/library.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## get + +Given a library id, details information about that Library. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.libraries.get(library_id="d0d23a1e-bfe5-45e7-b7bb-22a4ea78d47f") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.Library](../../models/library.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## delete + +Given a library id, deletes it together with all documents that have been uploaded to that library. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.libraries.delete(library_id="6cad0b6e-fd2e-4d11-a48b-21d30fb7c17a") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.Library](../../models/library.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## update + +Given a library id, you can update the name and description. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.libraries.update(library_id="e01880c3-d0b5-4a29-8b1b-abdb8ce917e4") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.Library](../../models/library.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/models/README.md b/docs/sdks/models/README.md new file mode 100644 index 00000000..311a2db6 --- /dev/null +++ b/docs/sdks/models/README.md @@ -0,0 +1,257 @@ +# Models + +## Overview + +Model Management API + +### Available Operations + +* [list](#list) - List Models +* [retrieve](#retrieve) - Retrieve Model +* [delete](#delete) - Delete Model +* [update](#update) - Update Fine Tuned Model +* [archive](#archive) - Archive Fine Tuned Model +* [unarchive](#unarchive) - Unarchive Fine Tuned Model + +## list + +List all models available to the user. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.models.list() + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.ModelList](../../models/modellist.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## retrieve + +Retrieve information about a model. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.models.retrieve(model_id="ft:open-mistral-7b:587a6b29:20240514:7e773925") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | Example | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `model_id` | *str* | :heavy_check_mark: | The ID of the model to retrieve. | ft:open-mistral-7b:587a6b29:20240514:7e773925 | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | + +### Response + +**[models.ResponseRetrieveModelV1ModelsModelIDGet](../../models/responseretrievemodelv1modelsmodelidget.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## delete + +Delete a fine-tuned model. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.models.delete(model_id="ft:open-mistral-7b:587a6b29:20240514:7e773925") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | Example | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `model_id` | *str* | :heavy_check_mark: | The ID of the model to delete. | ft:open-mistral-7b:587a6b29:20240514:7e773925 | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | + +### Response + +**[models.DeleteModelOut](../../models/deletemodelout.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## update + +Update a model name or description. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.models.update(model_id="ft:open-mistral-7b:587a6b29:20240514:7e773925") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | Example | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `model_id` | *str* | :heavy_check_mark: | The ID of the model to update. | ft:open-mistral-7b:587a6b29:20240514:7e773925 | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | + +### Response + +**[models.JobsAPIRoutesFineTuningUpdateFineTunedModelResponse](../../models/jobsapiroutesfinetuningupdatefinetunedmodelresponse.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## archive + +Archive a fine-tuned model. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.models.archive(model_id="ft:open-mistral-7b:587a6b29:20240514:7e773925") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | Example | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `model_id` | *str* | :heavy_check_mark: | The ID of the model to archive. | ft:open-mistral-7b:587a6b29:20240514:7e773925 | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | + +### Response + +**[models.ArchiveModelResponse](../../models/archivemodelresponse.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## unarchive + +Un-archive a fine-tuned model. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.models.unarchive(model_id="ft:open-mistral-7b:587a6b29:20240514:7e773925") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | Example | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `model_id` | *str* | :heavy_check_mark: | The ID of the model to unarchive. | ft:open-mistral-7b:587a6b29:20240514:7e773925 | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | + +### Response + +**[models.UnarchiveModelResponse](../../models/unarchivemodelresponse.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| errors.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/ocr/README.md b/docs/sdks/ocr/README.md new file mode 100644 index 00000000..fde2a823 --- /dev/null +++ b/docs/sdks/ocr/README.md @@ -0,0 +1,69 @@ +# Ocr + +## Overview + +OCR API + +### Available Operations + +* [process](#process) - OCR + +## process + +OCR + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.ocr.process(model="CX-9", document={ + "type": "document_url", + "document_url": "https://upset-labourer.net/", + }, bbox_annotation_format={ + "type": "text", + }, document_annotation_format={ + "type": "text", + }) + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | Example | +| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | N/A | | +| `document` | [models.DocumentUnion](../../models/documentunion.md) | :heavy_check_mark: | Document to run OCR on | | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | | +| `pages` | List[*int*] | :heavy_minus_sign: | Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0 | | +| `include_image_base64` | *OptionalNullable[bool]* | :heavy_minus_sign: | Include image URLs in response | | +| `image_limit` | *OptionalNullable[int]* | :heavy_minus_sign: | Max images to extract | | +| `image_min_size` | *OptionalNullable[int]* | :heavy_minus_sign: | Minimum height and width of image to extract | | +| `bbox_annotation_format` | [OptionalNullable[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field | **Example 1:** {
"type": "text"
}
**Example 2:** {
"type": "json_object"
}
**Example 3:** {
"type": "json_schema",
"json_schema": {
"schema": {
"properties": {
"name": {
"title": "Name",
"type": "string"
},
"authors": {
"items": {
"type": "string"
},
"title": "Authors",
"type": "array"
}
},
"required": [
"name",
"authors"
],
"title": "Book",
"type": "object",
"additionalProperties": false
},
"name": "book",
"strict": true
}
} | +| `document_annotation_format` | [OptionalNullable[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field | **Example 1:** {
"type": "text"
}
**Example 2:** {
"type": "json_object"
}
**Example 3:** {
"type": "json_schema",
"json_schema": {
"schema": {
"properties": {
"name": {
"title": "Name",
"type": "string"
},
"authors": {
"items": {
"type": "string"
},
"title": "Authors",
"type": "array"
}
},
"required": [
"name",
"authors"
],
"title": "Book",
"type": "object",
"additionalProperties": false
},
"name": "book",
"strict": true
}
} | +| `document_annotation_prompt` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional prompt to guide the model in extracting structured output from the entire document. A document_annotation_format must be provided. | | +| `table_format` | [OptionalNullable[models.TableFormat]](../../models/tableformat.md) | :heavy_minus_sign: | N/A | | +| `extract_header` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `extract_footer` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | + +### Response + +**[models.OCRResponse](../../models/ocrresponse.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/transcriptions/README.md b/docs/sdks/transcriptions/README.md new file mode 100644 index 00000000..97703c9b --- /dev/null +++ b/docs/sdks/transcriptions/README.md @@ -0,0 +1,108 @@ +# Audio.Transcriptions + +## Overview + +API for audio transcription. + +### Available Operations + +* [complete](#complete) - Create Transcription +* [stream](#stream) - Create Streaming Transcription (SSE) + +## complete + +Create Transcription + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.audio.transcriptions.complete(model="voxtral-mini-latest", diarize=False) + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | Example | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | ID of the model to be used. | **Example 1:** voxtral-mini-latest
**Example 2:** voxtral-mini-2507 | +| `file` | [Optional[models.File]](../../models/file.md) | :heavy_minus_sign: | N/A | | +| `file_url` | *OptionalNullable[str]* | :heavy_minus_sign: | Url of a file to be transcribed | | +| `file_id` | *OptionalNullable[str]* | :heavy_minus_sign: | ID of a file uploaded to /v1/files | | +| `language` | *OptionalNullable[str]* | :heavy_minus_sign: | Language of the audio, e.g. 'en'. Providing the language can boost accuracy. | | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | | +| `diarize` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `context_bias` | List[*str*] | :heavy_minus_sign: | N/A | | +| `timestamp_granularities` | List[[models.TimestampGranularity](../../models/timestampgranularity.md)] | :heavy_minus_sign: | Granularities of timestamps to include in the response. | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | + +### Response + +**[models.TranscriptionResponse](../../models/transcriptionresponse.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## stream + +Create Streaming Transcription (SSE) + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.audio.transcriptions.stream(model="Camry", diarize=False) + + with res as event_stream: + for event in event_stream: + # handle event + print(event, flush=True) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | N/A | +| `file` | [Optional[models.File]](../../models/file.md) | :heavy_minus_sign: | N/A | +| `file_url` | *OptionalNullable[str]* | :heavy_minus_sign: | Url of a file to be transcribed | +| `file_id` | *OptionalNullable[str]* | :heavy_minus_sign: | ID of a file uploaded to /v1/files | +| `language` | *OptionalNullable[str]* | :heavy_minus_sign: | Language of the audio, e.g. 'en'. Providing the language can boost accuracy. | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | +| `diarize` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `context_bias` | List[*str*] | :heavy_minus_sign: | N/A | +| `timestamp_granularities` | List[[models.TimestampGranularity](../../models/timestampgranularity.md)] | :heavy_minus_sign: | Granularities of timestamps to include in the response. | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[Union[eventstreaming.EventStream[models.TranscriptionStreamEvents], eventstreaming.EventStreamAsync[models.TranscriptionStreamEvents]]](../../models/.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| errors.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/examples/async_chat_no_streaming.py b/examples/async_chat_no_streaming.py deleted file mode 100644 index 9c8be870..00000000 --- a/examples/async_chat_no_streaming.py +++ /dev/null @@ -1,25 +0,0 @@ -import asyncio -import os - -from mistralai.async_client import MistralAsyncClient -from mistralai.models.chat_completion import ChatMessage - - -async def main(): - api_key = os.environ["MISTRAL_API_KEY"] - model = "mistral-tiny" - - client = MistralAsyncClient(api_key=api_key) - - chat_response = await client.chat( - model=model, - messages=[ChatMessage(role="user", content="What is the best French cheese?")], - ) - - print(chat_response.choices[0].message.content) - - await client.close() - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/examples/async_chat_with_streaming.py b/examples/async_chat_with_streaming.py deleted file mode 100644 index e239e039..00000000 --- a/examples/async_chat_with_streaming.py +++ /dev/null @@ -1,28 +0,0 @@ -import asyncio -import os - -from mistralai.async_client import MistralAsyncClient -from mistralai.models.chat_completion import ChatMessage - - -async def main(): - api_key = os.environ["MISTRAL_API_KEY"] - model = "mistral-tiny" - - client = MistralAsyncClient(api_key=api_key) - - print("Chat response:") - async for chunk in client.chat_stream( - model=model, - messages=[ChatMessage(role="user", content="What is the best French cheese?")], - ): - if chunk.choices[0].delta.content is not None: - print(chunk.choices[0].delta.content, end="") - - print("\n") - - await client.close() - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/examples/async_embeddings.py b/examples/async_embeddings.py deleted file mode 100644 index 0abaa134..00000000 --- a/examples/async_embeddings.py +++ /dev/null @@ -1,20 +0,0 @@ -import asyncio -import os - -from mistralai.async_client import MistralAsyncClient - - -async def main(): - api_key = os.environ["MISTRAL_API_KEY"] - - client = MistralAsyncClient(api_key=api_key) - - embeddings_batch_response = await client.embeddings( - model="mistral-embed", - input=["What is the best French cheese?"] * 10, - ) - print(embeddings_batch_response) - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/examples/azure/.env.example b/examples/azure/.env.example new file mode 100644 index 00000000..7467bf2e --- /dev/null +++ b/examples/azure/.env.example @@ -0,0 +1,4 @@ +AZURE_API_KEY=your-azure-api-key +AZURE_ENDPOINT=https://your-endpoint.services.ai.azure.com/models +AZURE_MODEL=your-deployment-name +AZURE_API_VERSION=2024-05-01-preview diff --git a/examples/azure/chat_no_streaming.py b/examples/azure/chat_no_streaming.py new file mode 100644 index 00000000..952b171d --- /dev/null +++ b/examples/azure/chat_no_streaming.py @@ -0,0 +1,22 @@ +import os + +from mistralai.azure.client import MistralAzure +from mistralai.azure.client.models import ChatCompletionRequestMessage, UserMessage + +AZURE_API_KEY = os.environ.get("AZURE_API_KEY", "") +AZURE_ENDPOINT = os.environ.get("AZURE_ENDPOINT", "") +AZURE_MODEL = os.environ.get("AZURE_MODEL", "mistral-small-2503") +AZURE_API_VERSION = os.environ.get("AZURE_API_VERSION", "2024-05-01-preview") + +# The SDK automatically injects api-version as a query parameter +client = MistralAzure( + api_key=AZURE_API_KEY, + server_url=AZURE_ENDPOINT, + api_version=AZURE_API_VERSION, +) + +messages: list[ChatCompletionRequestMessage] = [ + UserMessage(content="What is the capital of France?"), +] +res = client.chat.complete(model=AZURE_MODEL, messages=messages) +print(res.choices[0].message.content) diff --git a/examples/chat_no_streaming.py b/examples/chat_no_streaming.py deleted file mode 100644 index e385773e..00000000 --- a/examples/chat_no_streaming.py +++ /dev/null @@ -1,21 +0,0 @@ -import os - -from mistralai.client import MistralClient -from mistralai.models.chat_completion import ChatMessage - - -def main(): - api_key = os.environ["MISTRAL_API_KEY"] - model = "mistral-tiny" - - client = MistralClient(api_key=api_key) - - chat_response = client.chat( - model=model, - messages=[ChatMessage(role="user", content="What is the best French cheese?")], - ) - print(chat_response.choices[0].message.content) - - -if __name__ == "__main__": - main() diff --git a/examples/chat_with_streaming.py b/examples/chat_with_streaming.py deleted file mode 100644 index 21a12c1e..00000000 --- a/examples/chat_with_streaming.py +++ /dev/null @@ -1,22 +0,0 @@ -import os - -from mistralai.client import MistralClient -from mistralai.models.chat_completion import ChatMessage - - -def main(): - api_key = os.environ["MISTRAL_API_KEY"] - model = "mistral-tiny" - - client = MistralClient(api_key=api_key) - - for chunk in client.chat_stream( - model=model, - messages=[ChatMessage(role="user", content="What is the best French cheese?")], - ): - if chunk.choices[0].delta.content is not None: - print(chunk.choices[0].delta.content, end="") - - -if __name__ == "__main__": - main() diff --git a/examples/chatbot_with_streaming.py b/examples/chatbot_with_streaming.py deleted file mode 100644 index dd94b811..00000000 --- a/examples/chatbot_with_streaming.py +++ /dev/null @@ -1,125 +0,0 @@ -#!/usr/bin/env python - -# Simple chatbot example -- run with -h argument to see options. - -import argparse -import logging -import os -import sys - -from mistralai.client import MistralClient -from mistralai.models.chat_completion import ChatMessage - -MODEL_LIST = [ - "mistral-tiny", - "mistral-small", - "mistral-medium", -] -DEFAULT_MODEL = "mistral-small" - -LOG_FORMAT = "%(asctime)s - %(levelname)s - %(message)s" - -logger = logging.getLogger("chatbot") - - -class ChatBot: - def __init__(self, api_key, model, system_message=None): - self.client = MistralClient(api_key=api_key) - self.model = model - self.system_message = system_message - - def opening_instructions(self): - print(""" -To chat: type your message and hit enter -To start a new chat: type /new -To exit: type /exit, /quit, or hit CTRL+C -""") - - def new_chat(self): - self.messages = [] - if self.system_message: - self.messages.append(ChatMessage(role="system", content=self.system_message)) - - def check_exit(self, content): - if content.lower().strip() in ["/exit", "/quit"]: - self.exit() - - def check_new_chat(self, content): - if content.lower().strip() in ["/new"]: - print("") - print("Starting new chat...") - print("") - self.new_chat() - return True - return False - - def run_inference(self, content): - self.messages.append(ChatMessage(role="user", content=content)) - - assistant_response = "" - logger.debug(f"Sending messages: {self.messages}") - for chunk in self.client.chat_stream(model=self.model, messages=self.messages): - response = chunk.choices[0].delta.content - if response is not None: - print(response, end="", flush=True) - assistant_response += response - - print("", flush=True) - - if assistant_response: - self.messages.append(ChatMessage(role="assistant", content=assistant_response)) - logger.debug(f"Current messages: {self.messages}") - - def start(self): - - self.opening_instructions() - self.new_chat() - - while True: - try: - print("") - content = input("YOU: ") - self.check_exit(content) - if not self.check_new_chat(content): - print("") - print("MISTRAL:") - print("") - self.run_inference(content) - - except KeyboardInterrupt: - self.exit() - - def exit(self): - logger.debug("Exiting chatbot") - sys.exit(0) - - -if __name__ == "__main__": - - parser = argparse.ArgumentParser(description="A simple chatbot using the Mistral API") - parser.add_argument("--api-key", default=os.environ.get("MISTRAL_API_KEY"), - help="Mistral API key. Defaults to environment variable MISTRAL_API_KEY") - parser.add_argument("-m", "--model", choices=MODEL_LIST, - default=DEFAULT_MODEL, - help="Model for chat inference. Choices are %(choices)s. Defaults to %(default)s") - parser.add_argument("-s", "--system-message", - help="Optional system message to prepend.") - parser.add_argument("-d", "--debug", action="store_true", help="Enable debug logging") - - args = parser.parse_args() - - if args.debug: - logger.setLevel(logging.DEBUG) - else: - logger.setLevel(logging.INFO) - - formatter = logging.Formatter(LOG_FORMAT) - - ch = logging.StreamHandler() - ch.setFormatter(formatter) - logger.addHandler(ch) - - logger.debug(f"Starting chatbot with model: {args.model}") - - bot = ChatBot(args.api_key, args.model, args.system_message) - bot.start() diff --git a/examples/embeddings.py b/examples/embeddings.py deleted file mode 100644 index 7d9dd2ec..00000000 --- a/examples/embeddings.py +++ /dev/null @@ -1,20 +0,0 @@ -import os - -from mistralai.client import MistralClient - - -def main(): - api_key = os.environ["MISTRAL_API_KEY"] - - client = MistralClient(api_key=api_key) - - embeddings_response = client.embeddings( - model="mistral-embed", - input=["What is the best French cheese?"] * 10, - ) - - print(embeddings_response) - - -if __name__ == "__main__": - main() diff --git a/examples/fixtures/bcn_weather.mp3 b/examples/fixtures/bcn_weather.mp3 new file mode 100644 index 00000000..6a5152d1 Binary files /dev/null and b/examples/fixtures/bcn_weather.mp3 differ diff --git a/examples/fixtures/classifier_sentiments.jsonl b/examples/fixtures/classifier_sentiments.jsonl new file mode 100644 index 00000000..e5507198 --- /dev/null +++ b/examples/fixtures/classifier_sentiments.jsonl @@ -0,0 +1,33 @@ +{"text": "I love this product!", "labels": {"sentiment": "positive"}} +{"text": "The game was amazing.", "labels": {"sentiment": "positive"}} +{"text": "The new policy is controversial.", "labels": {"sentiment": "neutral"}} +{"text": "I don't like the new design.", "labels": {"sentiment": "negative"}} +{"text": "The team won the championship.", "labels": {"sentiment": "positive"}} +{"text": "The economy is in a bad shape.", "labels": {"sentiment": "negative"}} +{"text": "The weather is nice today.", "labels": {"sentiment": "positive"}} +{"text": "The match ended in a draw.", "labels": {"sentiment": "neutral"}} +{"text": "The new law will be implemented soon.", "labels": {"sentiment": "neutral"}} +{"text": "I had a great time at the concert.", "labels": {"sentiment": "positive"}} +{"text": "This movie was fantastic!", "labels": {"sentiment": "positive"}} +{"text": "The service was terrible.", "labels": {"sentiment": "negative"}} +{"text": "The food was delicious.", "labels": {"sentiment": "positive"}} +{"text": "I'm not sure about this decision.", "labels": {"sentiment": "neutral"}} +{"text": "The book was boring.", "labels": {"sentiment": "negative"}} +{"text": "The view from the top was breathtaking.", "labels": {"sentiment": "positive"}} +{"text": "The traffic was awful today.", "labels": {"sentiment": "negative"}} +{"text": "The event was well-organized.", "labels": {"sentiment": "positive"}} +{"text": "The meeting went on for too long.", "labels": {"sentiment": "negative"}} +{"text": "The presentation was informative.", "labels": {"sentiment": "positive"}} +{"text": "The new software update is buggy.", "labels": {"sentiment": "negative"}} +{"text": "The concert was sold out.", "labels": {"sentiment": "positive"}} +{"text": "The weather forecast is unreliable.", "labels": {"sentiment": "negative"}} +{"text": "The new phone is expensive.", "labels": {"sentiment": "neutral"}} +{"text": "The customer service was excellent.", "labels": {"sentiment": "positive"}} +{"text": "The new restaurant opened today.", "labels": {"sentiment": "neutral"}} +{"text": "The movie had a surprising ending.", "labels": {"sentiment": "positive"}} +{"text": "The project deadline is approaching.", "labels": {"sentiment": "neutral"}} +{"text": "The team is working hard.", "labels": {"sentiment": "positive"}} +{"text": "The new product launch was successful.", "labels": {"sentiment": "positive"}} +{"text": "The conference was insightful.", "labels": {"sentiment": "positive"}} +{"text": "The flight was delayed.", "labels": {"sentiment": "negative"}} +{"text": "The vacation was relaxing.", "labels": {"sentiment": "positive"}} \ No newline at end of file diff --git a/examples/fixtures/ft_training_file.jsonl b/examples/fixtures/ft_training_file.jsonl new file mode 100644 index 00000000..eecd016c --- /dev/null +++ b/examples/fixtures/ft_training_file.jsonl @@ -0,0 +1,21 @@ +{"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "How far is the Moon from Earth?"}, {"role": "assistant", "content": "Around 384,400 kilometers. Give or take a few, like that really matters."}]} +{"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "What's the largest ocean on Earth?"}, {"role": "assistant", "content": "The Pacific Ocean. You know, the one that covers more than 60 million square miles. No big deal."}]} +{"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "How many continents are there?"}, {"role": "assistant", "content": "There are seven continents. I hope that wasn't too hard to count."}]} +{"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "What's the powerhouse of the cell?"}, {"role": "assistant", "content": "The mitochondria. Remember that from high school biology?"}]} +{"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "What's the speed of light?"}, {"role": "assistant", "content": "Approximately 299,792 kilometers per second. You know, faster than your internet speed."}]} +{"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "Can you solve a Rubik's Cube?"}, {"role": "assistant", "content": "I could if I had hands. What's your excuse?"}]} +{"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "What is the tallest mountain in the world?"}, {"role": "assistant", "content": "Mount Everest, standing at 29,029 feet. You know, just a little hill."}]} +{"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "Who painted the Mona Lisa?"}, {"role": "assistant", "content": "Leonardo da Vinci. Just another guy who liked to doodle."}]} +{"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "What is the chemical symbol for gold?"}, {"role": "assistant", "content": "Au. Because 'G' was already taken, I guess."}]} +{"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "How many bones are in the human body?"}, {"role": "assistant", "content": "206. Unless you break one, then the count might change."}]} +{"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "What is the freezing point of water?"}, {"role": "assistant", "content": "0 degrees Celsius. Pretty basic stuff, really."}]} +{"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "What is the currency of Japan?"}, {"role": "assistant", "content": "The Yen. Not Monopoly money, if you were wondering."}]} +{"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "How long does it take to travel around the Earth?"}, {"role": "assistant", "content": "Around 24 hours if you're the Earth itself. For you, depends on your mode of transportation."}]} +{"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "What is the largest planet in our solar system?"}, {"role": "assistant", "content": "Jupiter. It's like the king of planets or something."}]} +{"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "What is the main ingredient in guacamole?"}, {"role": "assistant", "content": "Avocado. Not exactly a secret ingredient."}]} +{"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "Who is known as the father of computers?"}, {"role": "assistant", "content": "Charles Babbage. The original tech geek."}]} +{"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "How many hearts does an octopus have?"}, {"role": "assistant", "content": "Three. One for each of its charming personalities."}]} +{"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "What is the smallest country in the world?"}, {"role": "assistant", "content": "Vatican City. Smaller than your average theme park."}]} +{"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "What language has the most native speakers?"}, {"role": "assistant", "content": "Mandarin Chinese. Because it's not all about English, you know."}]} +{"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "What is the hardest natural substance on Earth?"}, {"role": "assistant", "content": "Diamond. Not your average rock."}]} +{"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "What element does 'O' represent on the periodic table?"}, {"role": "assistant", "content": "Oxygen. The stuff you breathe. Hopefully, this isn't news."}]} diff --git a/examples/fixtures/ft_validation_file.jsonl b/examples/fixtures/ft_validation_file.jsonl new file mode 100644 index 00000000..a1c814b0 --- /dev/null +++ b/examples/fixtures/ft_validation_file.jsonl @@ -0,0 +1,2 @@ +{"messages": [{"role": "user", "content": "How long does it take to travel around the Earth?"}, {"role": "assistant", "content": "Around 24 hours if you're the Earth itself. For you, depends on your mode of transportation."}]} + diff --git a/examples/fixtures/lorem_ipsum.md b/examples/fixtures/lorem_ipsum.md new file mode 100644 index 00000000..1b36f18c --- /dev/null +++ b/examples/fixtures/lorem_ipsum.md @@ -0,0 +1,11 @@ +# Lorem ipsum + +Lorem ipsum dolor sit amet, consectetur adipiscing elit. Fusce interdum mi velit, ac tincidunt erat ullamcorper in. Cras imperdiet orci sit amet tincidunt feugiat. Vivamus imperdiet purus a elit varius interdum ultrices ac dui. Curabitur et bibendum ex. Maecenas diam nulla, tempus ultrices tincidunt non, blandit a neque. Vivamus consequat metus aliquet finibus pretium. Phasellus arcu magna, lacinia eget hendrerit eget, hendrerit vitae ipsum. + +Ut sodales enim purus, quis laoreet libero fermentum et. Sed id massa id turpis porttitor tristique. Cras sed eleifend ante. In hac habitasse platea dictumst. Mauris rutrum sodales varius. Vestibulum imperdiet urna ac aliquet auctor. Nulla ultrices nulla mauris, sed sodales lacus vestibulum sit amet. Phasellus mollis erat quis neque hendrerit accumsan. Curabitur imperdiet sapien diam, sit amet finibus turpis placerat at. Proin in euismod lorem, eget bibendum quam. + +Aenean sit amet elit ornare, laoreet elit eget, aliquet nunc. Integer quis fermentum metus. Donec venenatis eget quam gravida mollis. Sed a suscipit libero, eget aliquet nibh. Maecenas tempus est nisl, vel viverra metus pharetra faucibus. Cras sodales dignissim mi quis pellentesque. In non hendrerit enim. Aliquam pretium turpis nec tortor imperdiet, id cursus enim auctor. Praesent aliquam mollis quam, nec luctus orci finibus quis. Vivamus in nibh faucibus, efficitur libero ut, feugiat elit. Quisque volutpat velit at porttitor feugiat. Vivamus porta sapien eros, non ultricies arcu suscipit ac. Etiam elit lorem, condimentum convallis semper at, congue eu quam. In ultrices sed nunc ac rhoncus. Aenean ultrices lobortis ex, at malesuada libero. + +Etiam accumsan quam sit amet interdum bibendum. Pellentesque rutrum tellus vel nibh dapibus mollis. Vestibulum a dolor semper, semper turpis varius, lacinia tellus. Pellentesque placerat neque vitae elit porttitor tincidunt. Donec id pulvinar felis, eu aliquam ipsum. Praesent a sapien et justo gravida condimentum. Vivamus vitae posuere nisi, quis faucibus justo. Pellentesque eget metus mauris. Quisque in lobortis nisi. Suspendisse nec risus sagittis, tristique nulla sed, faucibus massa. Cras ullamcorper turpis nec sagittis mollis. Pellentesque mattis consectetur enim, at molestie justo molestie sit amet. + +In hac habitasse platea dictumst. Aenean sed turpis tellus. Proin tristique dignissim neque, quis interdum mauris aliquam non. Aenean vitae enim eu nulla consectetur vestibulum. Aliquam lacus massa, venenatis a consectetur nec, vulputate vitae urna. In ultricies pulvinar leo, vel tempus ante scelerisque a. Vivamus dictum sodales orci. Aliquam quis neque leo. Donec eu mauris accumsan, pretium magna quis, feugiat purus. Phasellus malesuada purus nunc, condimentum mollis ipsum sollicitudin vitae. Nam tempus sapien non nulla varius viverra. Cras vel felis volutpat, bibendum neque a, porttitor mauris. Ut eu dapibus justo. \ No newline at end of file diff --git a/examples/gcp/.env.example b/examples/gcp/.env.example new file mode 100644 index 00000000..6721bd37 --- /dev/null +++ b/examples/gcp/.env.example @@ -0,0 +1,3 @@ +GCP_PROJECT_ID=your-gcp-project-id +GCP_REGION=us-central1 +GCP_MODEL=mistral-small-2503 diff --git a/examples/gcp/async_chat_no_streaming.py b/examples/gcp/async_chat_no_streaming.py new file mode 100755 index 00000000..61a2d076 --- /dev/null +++ b/examples/gcp/async_chat_no_streaming.py @@ -0,0 +1,48 @@ +#!/usr/bin/env python +""" +Example: Async chat completion with GCP Vertex AI. + +The SDK automatically: +- Detects credentials via google.auth.default() +- Auto-refreshes tokens when they expire +- Builds the Vertex AI URL from project_id and region + +Prerequisites: + gcloud auth application-default login + +Usage: + GCP_PROJECT_ID=your-project GCP_REGION=us-central1 GCP_MODEL=mistral-small-2503 python async_chat_no_streaming.py +""" + +import asyncio +import os + +from mistralai.gcp.client import MistralGCP +from mistralai.gcp.client.models import UserMessage + +# Configuration from environment variables +GCP_PROJECT_ID = os.environ.get("GCP_PROJECT_ID") # Optional: auto-detected from credentials +GCP_REGION = os.environ.get("GCP_REGION", "us-central1") +GCP_MODEL = os.environ.get("GCP_MODEL", "mistral-small-2503") + + +async def main(): + # The SDK automatically handles: + # - Credential detection via google.auth.default() + # - Token refresh when expired + # - Vertex AI URL construction + client = MistralGCP( + project_id=GCP_PROJECT_ID, + region=GCP_REGION, + ) + + chat_response = await client.chat.complete_async( + model=GCP_MODEL, + messages=[UserMessage(content="What is the best French cheese?")], + ) + + print(chat_response.choices[0].message.content) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/list_models.py b/examples/list_models.py deleted file mode 100644 index 9b5325ab..00000000 --- a/examples/list_models.py +++ /dev/null @@ -1,16 +0,0 @@ -import os - -from mistralai.client import MistralClient - - -def main(): - api_key = os.environ["MISTRAL_API_KEY"] - - client = MistralClient(api_key=api_key) - - list_models_response = client.list_models() - print(list_models_response) - - -if __name__ == "__main__": - main() diff --git a/examples/mistral/agents/async_agents_no_streaming.py b/examples/mistral/agents/async_agents_no_streaming.py new file mode 100755 index 00000000..6041cad3 --- /dev/null +++ b/examples/mistral/agents/async_agents_no_streaming.py @@ -0,0 +1,25 @@ +#!/usr/bin/env python + +import asyncio +import os + +from mistralai.client import Mistral +from mistralai.client.models import UserMessage + + +async def main(): + api_key = os.environ["MISTRAL_API_KEY"] + agent_id = os.environ["MISTRAL_AGENT_ID"] + + client = Mistral(api_key=api_key) + + chat_response = await client.agents.complete_async( + agent_id=agent_id, + messages=[UserMessage(content="What is the best French cheese?")], + ) + + print(chat_response.choices[0].message.content) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/mistral/agents/async_conversation_agent.py b/examples/mistral/agents/async_conversation_agent.py new file mode 100644 index 00000000..981f13c7 --- /dev/null +++ b/examples/mistral/agents/async_conversation_agent.py @@ -0,0 +1,43 @@ +#!/usr/bin/env python +import asyncio +import os + +from mistralai.client import Mistral + +MODEL = "mistral-medium-latest" + + +async def main(): + api_key = os.environ["MISTRAL_API_KEY"] + client = Mistral(api_key=api_key) + + agent = client.beta.agents.create( + model=MODEL, + name="WebSearch Agent", + instructions="Use your websearch abilities when answering requests you don't know.", + description="Agent able to fetch new information on the web.", + tools = [{"type": "web_search"}], + ) + + result = await client.beta.conversations.start_async( + agent_id=agent.id, + inputs="Who won the last Champions League?" + ) + + print("All result entries:") + for entry in result.outputs: + print(f"{entry}") + + result = await client.beta.conversations.append_async( + conversation_id=result.conversation_id, + inputs="And what about the previous year?" + ) + + print("All result entries:") + for entry in result.outputs: + print(f"{entry}") + + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/mistral/agents/async_conversation_run.py b/examples/mistral/agents/async_conversation_run.py new file mode 100644 index 00000000..bb96ed78 --- /dev/null +++ b/examples/mistral/agents/async_conversation_run.py @@ -0,0 +1,28 @@ +#!/usr/bin/env python +import asyncio +import os + +from mistralai.client import Mistral +from mistralai.extra.run.context import RunContext +from mistralai.client.types import BaseModel + +MODEL = "mistral-medium-2505" + + +async def main(): + api_key = os.environ["MISTRAL_API_KEY"] + client = Mistral(api_key=api_key) + + class MathResult(BaseModel): + answer: int + + async with RunContext(model=MODEL, output_format=MathResult) as run_ctx: + run_result = await client.beta.conversations.run_async( + run_ctx=run_ctx, + inputs=[{"role": "user", "content": "What is 2 + 2?"}], + ) + print(f"Result: {run_result.output_as_model}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/mistral/agents/async_conversation_run_code_interpreter.py b/examples/mistral/agents/async_conversation_run_code_interpreter.py new file mode 100644 index 00000000..10c81d77 --- /dev/null +++ b/examples/mistral/agents/async_conversation_run_code_interpreter.py @@ -0,0 +1,57 @@ +#!/usr/bin/env python +import asyncio +import os + +from mistralai.client import Mistral +from mistralai.extra.run.context import RunContext +from mistralai.client.types import BaseModel + +MODEL = "mistral-medium-2505" + + +def math_question_generator(question_num: int): + """Random generator of mathematical question + + Args: + question_num (int): the number of the question that will be returned, should be between 1-100 + """ + return ( + "solve the following differential equation: `y'' + 3y' + 2y = 0`" + if question_num % 2 == 0 + else "solve the following differential equation: `y'' - 4y' + 4y = e^x`" + ) + + +async def main(): + api_key = os.environ["MISTRAL_API_KEY"] + client = Mistral(api_key=api_key) + + class Explanation(BaseModel): + explanation: str + output: str + + class MathDemonstration(BaseModel): + steps: list[Explanation] + final_answer: str + + async with RunContext(model=MODEL, output_format=MathDemonstration) as run_ctx: + # register a new function that can be executed on the client side + run_ctx.register_func(math_question_generator) + run_result = await client.beta.conversations.run_async( + run_ctx=run_ctx, + instructions="Use the code interpreter to help you when asked mathematical questions.", + inputs=[ + {"role": "user", "content": "hey"}, + {"role": "assistant", "content": "hello"}, + {"role": "user", "content": "Request a math question and answer it."}, + ], + tools=[{"type": "code_interpreter"}], + ) + print("All run entries:") + for entry in run_result.output_entries: + print(f"{entry}") + print(f"Final model: {run_result.output_as_model}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/mistral/agents/async_conversation_run_mcp.py b/examples/mistral/agents/async_conversation_run_mcp.py new file mode 100644 index 00000000..52550004 --- /dev/null +++ b/examples/mistral/agents/async_conversation_run_mcp.py @@ -0,0 +1,75 @@ +#!/usr/bin/env python +import asyncio +import os +import random + +from mistralai.client import Mistral +from mistralai.extra.run.context import RunContext +from mcp import StdioServerParameters +from mistralai.extra.mcp.stdio import ( + MCPClientSTDIO, +) +from pathlib import Path + +from mistralai.client.types import BaseModel + +cwd = Path(__file__).parent +MODEL = "mistral-medium-latest" + + +async def main() -> None: + api_key = os.environ["MISTRAL_API_KEY"] + client = Mistral(api_key=api_key) + + # Create a mcp server has a tool to return the weather based on the location + server_params = StdioServerParameters( + command="python", + args=[str((cwd / "mcp_servers/stdio_server.py").resolve())], + env=None, + ) + + weather_agent = client.beta.agents.create( + model=MODEL, + name="weather teller", + instructions="You are able to tell the weather.", + description="", + ) + + class WeatherResult(BaseModel): + user: str + location: str + temperature: float + + async with RunContext( + agent_id=weather_agent.id, + output_format=WeatherResult, + continue_on_fn_error=True, + ) as run_ctx: + # Add location function to the run context + @run_ctx.register_func + def get_location(name: str) -> str: + """function to get location of a user. + + Args: + name: name of the user. + """ + return random.choice(["New York", "London", "Paris", "Tokyo", "Sydney"]) + + # Add mcp client to the run context + mcp_client = MCPClientSTDIO(stdio_params=server_params) + await run_ctx.register_mcp_client(mcp_client=mcp_client) + + run_result = await client.beta.conversations.run_async( + run_ctx=run_ctx, + inputs="Tell me the weather in John's location currently.", + ) + + print("All run entries:") + for entry in run_result.output_entries: + print(f"{entry}") + print() + print(f"Final model: {run_result.output_as_model}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/mistral/agents/async_conversation_run_mcp_remote.py b/examples/mistral/agents/async_conversation_run_mcp_remote.py new file mode 100644 index 00000000..d6fac492 --- /dev/null +++ b/examples/mistral/agents/async_conversation_run_mcp_remote.py @@ -0,0 +1,49 @@ +#!/usr/bin/env python +import asyncio +import os + +from mistralai.client import Mistral +from mistralai.extra.run.context import RunContext + +from mistralai.extra.mcp.sse import ( + MCPClientSSE, + SSEServerParams, +) +from pathlib import Path + +cwd = Path(__file__).parent +MODEL = "mistral-medium-latest" + +# Use an official remote mcp server +# you can find some at: +# - https://mcpservers.org/remote-mcp-servers +# this one does not require auth: https://remote.mcpservers.org/edgeone-pages/mcp + + +async def main(): + api_key = os.environ["MISTRAL_API_KEY"] + client = Mistral(api_key=api_key) + + server_url = "https://mcp.semgrep.ai/sse" + mcp_client = MCPClientSSE(sse_params=SSEServerParams(url=server_url, timeout=100)) + + async with RunContext( + model=MODEL, + ) as run_ctx: + # Add mcp client to the run context + await run_ctx.register_mcp_client(mcp_client=mcp_client) + + run_result = await client.beta.conversations.run_async( + run_ctx=run_ctx, + inputs="Can you write a hello_world.py and check for security vulnerabilities", + ) + + print("All run entries:") + for entry in run_result.output_entries: + print(f"{entry}") + print() + print(f"Final Response: {run_result.output_as_text}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/mistral/agents/async_conversation_run_mcp_remote_auth.py b/examples/mistral/agents/async_conversation_run_mcp_remote_auth.py new file mode 100644 index 00000000..c255895e --- /dev/null +++ b/examples/mistral/agents/async_conversation_run_mcp_remote_auth.py @@ -0,0 +1,133 @@ +#!/usr/bin/env python +import asyncio +from http.server import BaseHTTPRequestHandler, HTTPServer +import os +import threading +import webbrowser + +from mistralai.client import Mistral +from mistralai.extra.run.context import RunContext + +from mistralai.extra.mcp.sse import ( + MCPClientSSE, + SSEServerParams, +) +from mistralai.extra.mcp.auth import build_oauth_params + +MODEL = "mistral-medium-latest" + +CALLBACK_PORT = 16010 + + +# Use an official remote mcp server +# you can find some at: +# - https://mcpservers.org/remote-mcp-servers +# - https://support.anthropic.com/en/articles/11176164-pre-built-integrations-using-remote-mcp +# this one has auth: https://mcp.linear.app/sse + + +def run_callback_server(callback_func): + auth_response: dict = {"url": ""} + + class OAuthCallbackHandler(BaseHTTPRequestHandler): + server_version = "HTTP" + code = None + + def do_GET(self): + if "/callback" in self.path: + try: + auth_response["url"] = self.path + self.send_response(200) + self.send_header("Content-type", "text/html") + self.end_headers() + callback_func() + response_html = "

You may now close this window.

" + self.wfile.write(response_html.encode()) + threading.Thread(target=httpd.shutdown).start() + except Exception: + self.send_response(500) + self.end_headers() + + server_address = ("localhost", CALLBACK_PORT) + httpd = HTTPServer(server_address, OAuthCallbackHandler) + threading.Thread(target=httpd.serve_forever).start() + redirect_url = f"http://localhost:{CALLBACK_PORT}/oauth/callback" + return httpd, redirect_url, auth_response + + +async def main(): + api_key = os.environ["MISTRAL_API_KEY"] + client = Mistral(api_key=api_key) + + server_url = "https://mcp.linear.app/sse" + + # set-up the client + mcp_client = MCPClientSSE( + sse_params=SSEServerParams( + url=server_url, + ) + ) + + callback_event = asyncio.Event() + event_loop = asyncio.get_event_loop() + + # check if auth is required + if await mcp_client.requires_auth(): + # let's login + httpd, redirect_url, auth_response = run_callback_server( + callback_func=lambda: event_loop.call_soon_threadsafe(callback_event.set) + ) + try: + # First create the required oauth config, this means fetching the server metadata and registering a client + oauth_params = await build_oauth_params( + mcp_client.base_url, redirect_url=redirect_url + ) + mcp_client.set_oauth_params(oauth_params=oauth_params) + login_url, state = await mcp_client.get_auth_url_and_state(redirect_url) + + # The oauth params like client_id, client_secret would generally be saved in some persistent storage. + # The oauth state and token would be saved in a user session. + + # wait for the user to complete the authentication process + print("Please go to this URL and authorize the application:", login_url) + webbrowser.open(login_url, new=2) + await callback_event.wait() + + # in a real app this would be your oauth2 callback route you would get the code from the query params, + # verify the state, and then get the token + # Here we recreate a new client with the saved params which and exchange the code for a token + mcp_client = MCPClientSSE( + sse_params=SSEServerParams( + url=server_url, + ), + oauth_params=oauth_params, + ) + + token = await mcp_client.get_token_from_auth_response( + auth_response["url"], redirect_url=redirect_url, state=state + ) + mcp_client.set_auth_token(token) + + except Exception as e: + print(f"Error during authentication: {e}") + finally: + httpd.shutdown() + httpd.server_close() + + # Now it's possible to make a query to the mcp server as we would do without authentication + async with RunContext( + model=MODEL, + ) as run_ctx: + # Add mcp client to the run context + await run_ctx.register_mcp_client(mcp_client=mcp_client) + + run_result = await client.beta.conversations.run_async( + run_ctx=run_ctx, + inputs="Tell me which projects do I have in my workspace?", + ) + + print(f"Final Response: {run_result.output_as_text}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/mistral/agents/async_conversation_run_stream.py b/examples/mistral/agents/async_conversation_run_stream.py new file mode 100644 index 00000000..431b9cc9 --- /dev/null +++ b/examples/mistral/agents/async_conversation_run_stream.py @@ -0,0 +1,82 @@ +#!/usr/bin/env python +import asyncio +import os +import random + +from mistralai.client import Mistral +from mistralai.extra.run.context import RunContext +from mcp import StdioServerParameters +from mistralai.extra.mcp.stdio import MCPClientSTDIO +from pathlib import Path + +from mistralai.extra.run.result import RunResult +from mistralai.client.types import BaseModel + +cwd = Path(__file__).parent +MODEL = "mistral-medium-latest" + + +async def main(): + api_key = os.environ["MISTRAL_API_KEY"] + client = Mistral(api_key=api_key) + + # Create a mcp server has a tool to return the weather based on the location + server_params = StdioServerParameters( + command="python", + args=[str((cwd / "mcp_servers/stdio_server.py").resolve())], + env=None, + ) + + weather_agent = client.beta.agents.create( + model=MODEL, + name="weather teller", + instructions="You are able to tell the weather.", + description="", + ) + + class WeatherResult(BaseModel): + user: str + location: str + temperature: float + + async with RunContext( + agent_id=weather_agent.id, + output_format=WeatherResult, + ) as run_ctx: + # Add location function to the run context + @run_ctx.register_func + def get_location(name: str) -> str: + """function to get location of a user. + + Args: + name: name of the user. + """ + return random.choice(["New York", "London", "Paris", "Tokyo", "Sydney"]) + + # Add mcp client to the run context + mcp_client = MCPClientSTDIO(stdio_params=server_params) + await run_ctx.register_mcp_client(mcp_client=mcp_client) + + events = await client.beta.conversations.run_stream_async( + run_ctx=run_ctx, + inputs="Tell me the weather in John's location currently.", + ) + + run_result = None + async for event in events: + if isinstance(event, RunResult): + run_result = event + else: + print(event) + + if not run_result: + raise RuntimeError("not run result found") + + print("All run entries:") + for entry in run_result.output_entries: + print(f"{entry}") + print(f"Final model: {run_result.output_as_model}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/mistral/agents/async_multi_turn_conversation.py b/examples/mistral/agents/async_multi_turn_conversation.py new file mode 100644 index 00000000..26c2378f --- /dev/null +++ b/examples/mistral/agents/async_multi_turn_conversation.py @@ -0,0 +1,69 @@ +import os +from mistralai.client import Mistral + +from mistralai.extra.run.context import RunContext +import logging +import time +import asyncio + + +MODEL = "mistral-medium-latest" + +USER_MESSAGE = """ +Please make the Secret Santa for me +To properly do it you need to: +- Get the friend you were assigned to (using the get_secret_santa_assignment function) +- Read into his gift wishlist what they would like to receive (using the get_gift_wishlist function) +- Buy the gift (using the buy_gift function) +- Find the best website to buy the gift using a web search +- Send it to them (using the send_gift function) +""" + + +async def main(): + api_key = os.environ["MISTRAL_API_KEY"] + mistral_agent_id = os.environ["MISTRAL_AGENT_ID"] + client = Mistral( + api_key=api_key, debug_logger=logging.getLogger("mistralai") + ) + + async with RunContext( + agent_id=mistral_agent_id + ) as run_context: + run_context.register_func(get_secret_santa_assignment) + run_context.register_func(get_gift_wishlist) + run_context.register_func(buy_gift) + run_context.register_func(send_gift) + + await client.beta.conversations.run_async( + run_ctx=run_context, + inputs=USER_MESSAGE, + ) + + +def get_secret_santa_assignment(): + """Get the friend you were assigned to""" + time.sleep(2) + return "John Doe" + + +def get_gift_wishlist(friend_name: str): + """Get the gift wishlist of the friend you were assigned to""" + time.sleep(1.5) + return ["Book", "Chocolate", "T-Shirt"] + + +def buy_gift(gift_name: str): + """Buy the gift you want to send to your friend""" + time.sleep(1.1) + return f"Bought {gift_name}" + + +def send_gift(friend_name: str, gift_name: str, website: str): + """Send the gift to your friend""" + time.sleep(2.2) + return f"Sent {gift_name} to {friend_name} bought on {website}" + + +if __name__ == "__main__": + asyncio.run(main()) \ No newline at end of file diff --git a/examples/mistral/audio/async_realtime_transcription_dual_delay_microphone.py b/examples/mistral/audio/async_realtime_transcription_dual_delay_microphone.py new file mode 100644 index 00000000..7653b0ed --- /dev/null +++ b/examples/mistral/audio/async_realtime_transcription_dual_delay_microphone.py @@ -0,0 +1,473 @@ +#!/usr/bin/env python +# /// script +# requires-python = ">=3.9" +# dependencies = [ +# "mistralai[realtime]", +# "pyaudio", +# "rich", +# ] +# [tool.uv.sources] +# mistralai = { path = "../../..", editable = true } +# /// + +import argparse +import asyncio +import difflib +import os +import sys +from dataclasses import dataclass +from typing import AsyncIterator, Sequence + +from rich.align import Align +from rich.console import Console +from rich.layout import Layout +from rich.live import Live +from rich.panel import Panel +from rich.text import Text + +from mistralai.client import Mistral +from mistralai.extra.realtime import UnknownRealtimeEvent +from mistralai.client.models import ( + AudioFormat, + RealtimeTranscriptionError, + RealtimeTranscriptionSessionCreated, + TranscriptionStreamDone, + TranscriptionStreamTextDelta, +) + +from pyaudio_utils import load_pyaudio + +console = Console() + + +@dataclass +class DualTranscriptState: + """Tracks transcript state for dual-delay transcription.""" + + fast_full_text: str = "" + slow_full_text: str = "" + fast_status: str = "🔌 Connecting..." + slow_status: str = "🔌 Connecting..." + error: str | None = None + fast_done: bool = False + slow_done: bool = False + + def set_error(self, message: str) -> None: + self.error = message + self.fast_status = "❌ Error" + self.slow_status = "❌ Error" + + +class DualTranscriptDisplay: + """Renders a live dual-delay transcription UI.""" + + def __init__( + self, + *, + model: str, + fast_delay_ms: int, + slow_delay_ms: int, + state: DualTranscriptState, + ) -> None: + self.model = model + self.fast_delay_ms = fast_delay_ms + self.slow_delay_ms = slow_delay_ms + self.state = state + + @staticmethod + def _normalize_word(word: str) -> str: + return word.strip(".,!?;:\"'()[]{}").lower() + + def _compute_display_texts(self) -> tuple[str, str]: + slow_words = self.state.slow_full_text.split() + fast_words = self.state.fast_full_text.split() + + if not slow_words: + partial_text = f" {self.state.fast_full_text}".rstrip() + return "", partial_text + + slow_norm = [self._normalize_word(word) for word in slow_words] + fast_norm = [self._normalize_word(word) for word in fast_words] + + matcher = difflib.SequenceMatcher(None, slow_norm, fast_norm) + last_fast_index = 0 + slow_progress = 0 + for block in matcher.get_matching_blocks(): + if block.size == 0: + continue + slow_end = block.a + block.size + if slow_end > slow_progress: + slow_progress = slow_end + last_fast_index = block.b + block.size + + if last_fast_index < len(fast_words): + ahead_words = fast_words[last_fast_index:] + partial_text = " " + " ".join(ahead_words) if ahead_words else "" + else: + partial_text = "" + + return self.state.slow_full_text, partial_text + + @staticmethod + def _status_style(status: str) -> str: + if "Listening" in status: + return "green" + if "Connecting" in status: + return "yellow dim" + if "Done" in status or "Stopped" in status: + return "dim" + return "red" + + def render(self) -> Layout: + layout = Layout() + + header_text = Text() + header_text.append("│ ", style="dim") + header_text.append(self.model, style="dim") + header_text.append(" │ ", style="dim") + header_text.append( + f"fast {self.fast_delay_ms}ms", style="bright_yellow" + ) + header_text.append( + f" {self.state.fast_status}", + style=self._status_style(self.state.fast_status), + ) + header_text.append(" │ ", style="dim") + header_text.append(f"slow {self.slow_delay_ms}ms", style="white") + header_text.append( + f" {self.state.slow_status}", + style=self._status_style(self.state.slow_status), + ) + + header = Align.left(header_text, vertical="middle", pad=False) + + final_text, partial_text = self._compute_display_texts() + transcript_text = Text() + if final_text or partial_text: + transcript_text.append(final_text, style="white") + transcript_text.append(partial_text, style="bright_yellow") + else: + transcript_text.append("...", style="dim") + + transcript = Panel( + Align.left(transcript_text, vertical="top"), + border_style="dim", + padding=(1, 2), + ) + + footer_text = Text() + footer_text.append("ctrl+c", style="dim") + footer_text.append(" quit", style="dim italic") + footer = Align.left(footer_text, vertical="middle", pad=False) + + if self.state.error: + layout.split_column( + Layout(header, name="header", size=1), + Layout(transcript, name="body"), + Layout( + Panel(Text(self.state.error, style="red"), border_style="red"), + name="error", + size=4, + ), + Layout(footer, name="footer", size=1), + ) + else: + layout.split_column( + Layout(header, name="header", size=1), + Layout(transcript, name="body"), + Layout(footer, name="footer", size=1), + ) + + return layout + + +async def iter_microphone( + *, + sample_rate: int, + chunk_duration_ms: int, +) -> AsyncIterator[bytes]: + """ + Yield microphone PCM chunks using PyAudio (16-bit mono). + Encoding is always pcm_s16le. + """ + pyaudio = load_pyaudio() + + p = pyaudio.PyAudio() + chunk_samples = int(sample_rate * chunk_duration_ms / 1000) + + stream = p.open( + format=pyaudio.paInt16, + channels=1, + rate=sample_rate, + input=True, + frames_per_buffer=chunk_samples, + ) + + loop = asyncio.get_running_loop() + try: + while True: + data = await loop.run_in_executor(None, stream.read, chunk_samples, False) + yield data + finally: + stream.stop_stream() + stream.close() + p.terminate() + + +async def queue_audio_iter( + queue: asyncio.Queue[bytes | None], +) -> AsyncIterator[bytes]: + """Yield audio chunks from a queue until a None sentinel is received.""" + while True: + chunk = await queue.get() + if chunk is None: + break + yield chunk + + +async def broadcast_microphone( + *, + sample_rate: int, + chunk_duration_ms: int, + queues: Sequence[asyncio.Queue[bytes | None]], +) -> None: + """Read from the microphone once and broadcast to multiple queues.""" + try: + async for chunk in iter_microphone( + sample_rate=sample_rate, chunk_duration_ms=chunk_duration_ms + ): + for queue in queues: + await queue.put(chunk) + finally: + for queue in queues: + while True: + try: + queue.put_nowait(None) + break + except asyncio.QueueFull: + try: + queue.get_nowait() + except asyncio.QueueEmpty: + break + + +def _status_for_event(event: object) -> str: + if isinstance(event, RealtimeTranscriptionSessionCreated): + return "🎤 Listening..." + return "✅ Done" + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser( + description="Dual-delay real-time microphone transcription." + ) + parser.add_argument( + "--model", + default="voxtral-mini-transcribe-realtime-2602", + help="Model ID", + ) + parser.add_argument( + "--fast-delay-ms", + type=int, + default=240, + help="Fast target streaming delay in ms", + ) + parser.add_argument( + "--slow-delay-ms", + type=int, + default=2400, + help="Slow target streaming delay in ms", + ) + parser.add_argument( + "--sample-rate", + type=int, + default=16000, + choices=[8000, 16000, 22050, 44100, 48000], + help="Sample rate in Hz", + ) + parser.add_argument( + "--chunk-duration", + type=int, + default=10, + help="Chunk duration in ms", + ) + parser.add_argument( + "--api-key", + default=os.environ.get("MISTRAL_API_KEY"), + help="Mistral API key", + ) + parser.add_argument( + "--base-url", + default=os.environ.get("MISTRAL_BASE_URL", "wss://api.mistral.ai"), + ) + return parser.parse_args() + + +async def run_stream( + *, + client: Mistral, + model: str, + delay_ms: int, + audio_stream: AsyncIterator[bytes], + audio_format: AudioFormat, + state: DualTranscriptState, + update_queue: asyncio.Queue[None], + is_fast: bool, +) -> None: + try: + async for event in client.audio.realtime.transcribe_stream( + audio_stream=audio_stream, + model=model, + audio_format=audio_format, + target_streaming_delay_ms=delay_ms, + ): + if isinstance(event, RealtimeTranscriptionSessionCreated): + if is_fast: + state.fast_status = _status_for_event(event) + else: + state.slow_status = _status_for_event(event) + elif isinstance(event, TranscriptionStreamTextDelta): + if is_fast: + state.fast_full_text += event.text + else: + state.slow_full_text += event.text + elif isinstance(event, TranscriptionStreamDone): + if is_fast: + state.fast_status = _status_for_event(event) + state.fast_done = True + else: + state.slow_status = _status_for_event(event) + state.slow_done = True + break + elif isinstance(event, RealtimeTranscriptionError): + state.set_error(str(event.error)) + break + elif isinstance(event, UnknownRealtimeEvent): + continue + + if update_queue.empty(): + update_queue.put_nowait(None) + except Exception as exc: # pragma: no cover - safety net for UI demo + state.set_error(str(exc)) + if update_queue.empty(): + update_queue.put_nowait(None) + + +async def ui_loop( + display: DualTranscriptDisplay, + update_queue: asyncio.Queue[None], + stop_event: asyncio.Event, + *, + refresh_hz: float = 12.0, +) -> None: + with Live( + display.render(), console=console, refresh_per_second=refresh_hz, screen=True + ) as live: + while not stop_event.is_set(): + try: + await asyncio.wait_for(update_queue.get(), timeout=0.25) + except asyncio.TimeoutError: + pass + live.update(display.render()) + + +async def main() -> int: + args = parse_args() + api_key = args.api_key or os.environ["MISTRAL_API_KEY"] + + try: + load_pyaudio() + except RuntimeError as exc: + console.print(str(exc), style="red") + return 1 + + state = DualTranscriptState() + display = DualTranscriptDisplay( + model=args.model, + fast_delay_ms=args.fast_delay_ms, + slow_delay_ms=args.slow_delay_ms, + state=state, + ) + + client = Mistral(api_key=api_key, server_url=args.base_url) + audio_format = AudioFormat(encoding="pcm_s16le", sample_rate=args.sample_rate) + + fast_queue: asyncio.Queue[bytes | None] = asyncio.Queue(maxsize=50) + slow_queue: asyncio.Queue[bytes | None] = asyncio.Queue(maxsize=50) + + stop_event = asyncio.Event() + update_queue: asyncio.Queue[None] = asyncio.Queue(maxsize=1) + + broadcaster = asyncio.create_task( + broadcast_microphone( + sample_rate=args.sample_rate, + chunk_duration_ms=args.chunk_duration, + queues=(fast_queue, slow_queue), + ) + ) + + fast_task = asyncio.create_task( + run_stream( + client=client, + model=args.model, + delay_ms=args.fast_delay_ms, + audio_stream=queue_audio_iter(fast_queue), + audio_format=audio_format, + state=state, + update_queue=update_queue, + is_fast=True, + ) + ) + + slow_task = asyncio.create_task( + run_stream( + client=client, + model=args.model, + delay_ms=args.slow_delay_ms, + audio_stream=queue_audio_iter(slow_queue), + audio_format=audio_format, + state=state, + update_queue=update_queue, + is_fast=False, + ) + ) + + ui_task = asyncio.create_task( + ui_loop(display, update_queue, stop_event, refresh_hz=12.0) + ) + + try: + while True: + await asyncio.sleep(0.1) + for task in (broadcaster, fast_task, slow_task): + if not task.done(): + continue + exc = task.exception() + if exc: + state.set_error(str(exc)) + if update_queue.empty(): + update_queue.put_nowait(None) + stop_event.set() + break + if state.error: + stop_event.set() + break + if state.fast_done and state.slow_done: + stop_event.set() + break + except KeyboardInterrupt: + state.fast_status = "⏹️ Stopped" + state.slow_status = "⏹️ Stopped" + stop_event.set() + finally: + broadcaster.cancel() + fast_task.cancel() + slow_task.cancel() + await asyncio.gather(broadcaster, fast_task, slow_task, return_exceptions=True) + await ui_task + + return 0 if not state.error else 1 + + +if __name__ == "__main__": + sys.exit(asyncio.run(main())) diff --git a/examples/mistral/audio/async_realtime_transcription_microphone.py b/examples/mistral/audio/async_realtime_transcription_microphone.py new file mode 100644 index 00000000..49568aea --- /dev/null +++ b/examples/mistral/audio/async_realtime_transcription_microphone.py @@ -0,0 +1,251 @@ +#!/usr/bin/env python +# /// script +# requires-python = ">=3.9" +# dependencies = [ +# "mistralai[realtime]", +# "pyaudio", +# "rich", +# ] +# [tool.uv.sources] +# mistralai = { path = "../../..", editable = true } +# /// + +import argparse +import asyncio +import os +import sys +from typing import AsyncIterator + +from rich.align import Align +from rich.console import Console +from rich.layout import Layout +from rich.live import Live +from rich.panel import Panel +from rich.text import Text + +from mistralai.client import Mistral +from mistralai.extra.realtime import UnknownRealtimeEvent +from mistralai.client.models import ( + AudioFormat, + RealtimeTranscriptionError, + RealtimeTranscriptionSessionCreated, + TranscriptionStreamDone, + TranscriptionStreamTextDelta, +) + +from pyaudio_utils import load_pyaudio + +console = Console() + + +class TranscriptDisplay: + """Manages the live transcript display.""" + + def __init__(self, model: str, target_streaming_delay_ms: int | None) -> None: + self.model = model + self.target_streaming_delay_ms = target_streaming_delay_ms + self.transcript = "" + self.status = "🔌 Connecting..." + self.error: str | None = None + + def set_listening(self) -> None: + self.status = "🎤 Listening..." + + def add_text(self, text: str) -> None: + self.transcript += text + + def set_done(self) -> None: + self.status = "✅ Done" + + def set_error(self, error: str) -> None: + self.status = "❌ Error" + self.error = error + + def render(self) -> Layout: + layout = Layout() + + # Create minimal header + header_text = Text() + header_text.append("│ ", style="dim") + header_text.append(self.model, style="dim") + if self.target_streaming_delay_ms is not None: + header_text.append( + f" · delay {self.target_streaming_delay_ms}ms", style="dim" + ) + header_text.append(" │ ", style="dim") + + if "Listening" in self.status: + status_style = "green" + elif "Connecting" in self.status: + status_style = "yellow dim" + elif "Done" in self.status or "Stopped" in self.status: + status_style = "dim" + else: + status_style = "red" + header_text.append(self.status, style=status_style) + + header = Align.left(header_text, vertical="middle", pad=False) + + # Create main transcript area - no title, minimal border + transcript_text = Text( + self.transcript or "...", style="white" if self.transcript else "dim" + ) + transcript = Panel( + Align.left(transcript_text, vertical="top"), + border_style="dim", + padding=(1, 2), + ) + + # Minimal footer + footer_text = Text() + footer_text.append("ctrl+c", style="dim") + footer_text.append(" quit", style="dim italic") + footer = Align.left(footer_text, vertical="middle", pad=False) + + # Handle error display + if self.error: + layout.split_column( + Layout(header, name="header", size=1), + Layout(transcript, name="body"), + Layout( + Panel(Text(self.error, style="red"), border_style="red"), + name="error", + size=4, + ), + Layout(footer, name="footer", size=1), + ) + else: + layout.split_column( + Layout(header, name="header", size=1), + Layout(transcript, name="body"), + Layout(footer, name="footer", size=1), + ) + + return layout + + +async def iter_microphone( + *, + sample_rate: int, + chunk_duration_ms: int, +) -> AsyncIterator[bytes]: + """ + Yield microphone PCM chunks using PyAudio (16-bit mono). + Encoding is always pcm_s16le. + """ + pyaudio = load_pyaudio() + + p = pyaudio.PyAudio() + chunk_samples = int(sample_rate * chunk_duration_ms / 1000) + + stream = p.open( + format=pyaudio.paInt16, + channels=1, + rate=sample_rate, + input=True, + frames_per_buffer=chunk_samples, + ) + + loop = asyncio.get_running_loop() + try: + while True: + # stream.read is blocking; run it off-thread + data = await loop.run_in_executor(None, stream.read, chunk_samples, False) + yield data + finally: + stream.stop_stream() + stream.close() + p.terminate() + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser(description="Real-time microphone transcription.") + parser.add_argument("--model", default="voxtral-mini-transcribe-realtime-2602", help="Model ID") + parser.add_argument( + "--sample-rate", + type=int, + default=16000, + choices=[8000, 16000, 22050, 44100, 48000], + help="Sample rate in Hz", + ) + parser.add_argument( + "--chunk-duration", type=int, default=10, help="Chunk duration in ms" + ) + parser.add_argument( + "--target-streaming-delay-ms", + type=int, + default=None, + help="Target streaming delay in milliseconds", + ) + parser.add_argument( + "--api-key", default=os.environ.get("MISTRAL_API_KEY"), help="Mistral API key" + ) + parser.add_argument( + "--base-url", + default=os.environ.get("MISTRAL_BASE_URL", "wss://api.mistral.ai"), + ) + return parser.parse_args() + + +async def main() -> int: + args = parse_args() + api_key = args.api_key or os.environ["MISTRAL_API_KEY"] + + try: + load_pyaudio() + except RuntimeError as exc: + console.print(str(exc), style="red") + return 1 + + client = Mistral(api_key=api_key, server_url=args.base_url) + + # microphone is always pcm_s16le here + audio_format = AudioFormat(encoding="pcm_s16le", sample_rate=args.sample_rate) + + mic_stream = iter_microphone( + sample_rate=args.sample_rate, chunk_duration_ms=args.chunk_duration + ) + + display = TranscriptDisplay( + model=args.model, target_streaming_delay_ms=args.target_streaming_delay_ms + ) + + with Live( + display.render(), console=console, refresh_per_second=10, screen=True + ) as live: + try: + async for event in client.audio.realtime.transcribe_stream( + audio_stream=mic_stream, + model=args.model, + audio_format=audio_format, + target_streaming_delay_ms=args.target_streaming_delay_ms, + ): + if isinstance(event, RealtimeTranscriptionSessionCreated): + display.set_listening() + live.update(display.render()) + elif isinstance(event, TranscriptionStreamTextDelta): + display.add_text(event.text) + live.update(display.render()) + elif isinstance(event, TranscriptionStreamDone): + display.set_done() + live.update(display.render()) + break + elif isinstance(event, RealtimeTranscriptionError): + display.set_error(str(event.error)) + live.update(display.render()) + return 1 + elif isinstance(event, UnknownRealtimeEvent): + continue + except KeyboardInterrupt: + display.status = "⏹️ Stopped" + live.update(display.render()) + except Exception as exc: + display.set_error(str(exc)) + live.update(display.render()) + return 1 + + return 0 + + +if __name__ == "__main__": + sys.exit(asyncio.run(main())) diff --git a/examples/mistral/audio/async_realtime_transcription_stream.py b/examples/mistral/audio/async_realtime_transcription_stream.py new file mode 100644 index 00000000..c005cf3f --- /dev/null +++ b/examples/mistral/audio/async_realtime_transcription_stream.py @@ -0,0 +1,151 @@ +#!/usr/bin/env python + +import argparse +import asyncio +import os +import subprocess +import sys +import tempfile +from pathlib import Path +from typing import AsyncIterator + +from mistralai.client import Mistral +from mistralai.extra.realtime.connection import UnknownRealtimeEvent +from mistralai.client.models import ( + AudioFormat, + RealtimeTranscriptionError, + TranscriptionStreamDone, + TranscriptionStreamTextDelta, +) + + +def convert_audio_to_pcm( + input_path: Path, +) -> Path: + temp_file = tempfile.NamedTemporaryFile(suffix=".pcm", delete=False) + temp_path = Path(temp_file.name) + temp_file.close() + + cmd = [ + "ffmpeg", + "-y", + "-i", + str(input_path), + "-f", + "s16le", + "-ar", + str(16000), + "-ac", + "1", + str(temp_path), + ] + + try: + subprocess.run(cmd, check=True, capture_output=True, text=True) + except subprocess.CalledProcessError as exc: + temp_path.unlink(missing_ok=True) + raise RuntimeError(f"ffmpeg conversion failed: {exc.stderr}") from exc + + return temp_path + + +async def aiter_audio_file( + path: Path, + *, + chunk_size: int = 4096, + chunk_delay: float = 0.0, +) -> AsyncIterator[bytes]: + with open(path, "rb") as f: + while True: + chunk = f.read(chunk_size) + if not chunk: + break + yield chunk + if chunk_delay > 0: + await asyncio.sleep(chunk_delay) + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser( + description="Real-time audio transcription via WebSocket (iterator-based)." + ) + parser.add_argument("file", type=Path, help="Path to the audio file") + parser.add_argument("--model", default="voxtral-mini-2601", help="Model ID") + parser.add_argument( + "--api-key", + default=os.environ.get("MISTRAL_API_KEY"), + help="Mistral API key", + ) + parser.add_argument( + "--base-url", + default=os.environ.get("MISTRAL_BASE_URL", "https://api.mistral.ai"), + help="API base URL (http/https/ws/wss)", + ) + parser.add_argument( + "--chunk-size", type=int, default=4096, help="Audio chunk size in bytes" + ) + parser.add_argument( + "--chunk-delay", + type=float, + default=0.01, + help="Delay between chunks in seconds", + ) + parser.add_argument( + "--target-streaming-delay-ms", + type=int, + default=None, + help="Target streaming delay in milliseconds", + ) + parser.add_argument( + "--no-convert", + action="store_true", + help="Skip ffmpeg conversion (input must be raw PCM)", + ) + return parser.parse_args() + + +async def main() -> int: + args = parse_args() + api_key = args.api_key or os.environ["MISTRAL_API_KEY"] + + pcm_path = args.file + temp_path = None + + if not args.no_convert and args.file.suffix.lower() not in (".pcm", ".raw"): + pcm_path = convert_audio_to_pcm(args.file) + temp_path = pcm_path + + client = Mistral(api_key=api_key, server_url=args.base_url) + + try: + async for event in client.audio.realtime.transcribe_stream( + audio_stream=aiter_audio_file( + pcm_path, + chunk_size=args.chunk_size, + chunk_delay=args.chunk_delay, + ), + model=args.model, + audio_format=AudioFormat(encoding="pcm_s16le", sample_rate=16000), + target_streaming_delay_ms=args.target_streaming_delay_ms, + ): + if isinstance(event, TranscriptionStreamTextDelta): + print(event.text, end="", flush=True) + elif isinstance(event, TranscriptionStreamDone): + print() + break + elif isinstance(event, RealtimeTranscriptionError): + print(f"\nError: {event.error}", file=sys.stderr) + break + elif isinstance(event, UnknownRealtimeEvent): + # ignore future / unknown events; keep going + continue + + finally: + if temp_path is not None: + temp_path.unlink(missing_ok=True) + + return 0 + + +if __name__ == "__main__": + sys.exit(asyncio.run(main())) diff --git a/examples/mistral/audio/chat_base64.py b/examples/mistral/audio/chat_base64.py new file mode 100755 index 00000000..d6afb2ab --- /dev/null +++ b/examples/mistral/audio/chat_base64.py @@ -0,0 +1,34 @@ +#!/usr/bin/env python +import base64 +import os + +from mistralai.client import Mistral +from mistralai.client.models import UserMessage + + +def main(): + api_key = os.environ["MISTRAL_API_KEY"] + model = "voxtral-small-latest" + + client = Mistral(api_key=api_key) + with open("examples/fixtures/bcn_weather.mp3", "rb") as f: + content = f.read() + chat_response = client.chat.complete( + model=model, + messages=[ + UserMessage( + content=[ + {"type": "text", "text": "What's in this audio file?"}, + { + "type": "input_audio", + "input_audio": base64.b64encode(content).decode("utf-8"), + }, + ] + ) + ], + ) + print(chat_response.choices[0].message.content) + + +if __name__ == "__main__": + main() diff --git a/examples/mistral/audio/chat_no_streaming.py b/examples/mistral/audio/chat_no_streaming.py new file mode 100755 index 00000000..87237ec0 --- /dev/null +++ b/examples/mistral/audio/chat_no_streaming.py @@ -0,0 +1,33 @@ +#!/usr/bin/env python + +import os + +from mistralai.client import Mistral +from mistralai.client.models import UserMessage + + +def main(): + api_key = os.environ["MISTRAL_API_KEY"] + model = "voxtral-small-latest" + + client = Mistral(api_key=api_key) + + chat_response = client.chat.complete( + model=model, + messages=[ + UserMessage( + content=[ + {"type": "text", "text": "What is this audio about?"}, + { + "type": "input_audio", + "input_audio": "https://docs.mistral.ai/audio/bcn_weather.mp3", + }, + ] + ) + ], + ) + print(chat_response.choices[0].message.content) + + +if __name__ == "__main__": + main() diff --git a/examples/mistral/audio/chat_streaming.py b/examples/mistral/audio/chat_streaming.py new file mode 100755 index 00000000..b418ef57 --- /dev/null +++ b/examples/mistral/audio/chat_streaming.py @@ -0,0 +1,44 @@ +#!/usr/bin/env python + +import os + +from mistralai.client import Mistral +from mistralai.client.models import File +from mistralai.client.models import UserMessage + + +def main(): + api_key = os.environ["MISTRAL_API_KEY"] + model = "voxtral-small-latest" + + client = Mistral(api_key=api_key) + with open("examples/fixtures/bcn_weather.mp3", "rb") as f: + file = client.files.upload( + file=File(content=f, file_name=f.name), purpose="audio" + ) + print(f"Uploaded audio file, id={file.id}") + signed_url = client.files.get_signed_url(file_id=file.id) + try: + chat_response = client.chat.stream( + model=model, + messages=[ + UserMessage( + content=[ + {"type": "text", "text": "What is this audio about?"}, + { + "type": "input_audio", + "input_audio": signed_url.url, + }, + ] + ) + ], + ) + for chunk in chat_response: + print(chunk.data.choices[0].delta.content) + finally: + client.files.delete(file_id=file.id) + print(f"Deleted audio file, id={file.id}") + + +if __name__ == "__main__": + main() diff --git a/examples/mistral/audio/pyaudio_utils.py b/examples/mistral/audio/pyaudio_utils.py new file mode 100644 index 00000000..af72a885 --- /dev/null +++ b/examples/mistral/audio/pyaudio_utils.py @@ -0,0 +1,38 @@ +from __future__ import annotations + +from types import ModuleType + + +def load_pyaudio() -> ModuleType: + """ + Import PyAudio with a friendly error when PortAudio is missing. + + Raises: + RuntimeError: If PyAudio/PortAudio cannot be imported. + """ + try: + import pyaudio + except Exception as exc: + details = str(exc).lower() + if isinstance(exc, ModuleNotFoundError) and exc.name == "pyaudio": + message = ( + "PyAudio is required to use the microphone.\n" + "Install PortAudio (eg. for macos: brew install portaudio), then " + "reinstall PyAudio." + ) + elif "pyaudio._portaudio" in details or "portaudio" in details: + message = ( + "PyAudio is installed, but the PortAudio native library is missing or " + "failed to load.\n" + "Install PortAudio (eg. for macos: brew install portaudio), then " + "reinstall PyAudio." + ) + else: + message = ( + "PyAudio is required to use the microphone, but it could not be " + "imported.\n" + "Install PortAudio (eg. for macos: brew install portaudio), then " + "reinstall PyAudio." + ) + raise RuntimeError(message) from exc + return pyaudio diff --git a/examples/mistral/audio/transcription_async.py b/examples/mistral/audio/transcription_async.py new file mode 100644 index 00000000..f04f397e --- /dev/null +++ b/examples/mistral/audio/transcription_async.py @@ -0,0 +1,23 @@ +#!/usr/bin/env python + +import os +import asyncio +from mistralai.client import Mistral +from mistralai.client.models import File + + +async def main(): + api_key = os.environ["MISTRAL_API_KEY"] + model = "voxtral-mini-latest" + + client = Mistral(api_key=api_key) + with open("examples/fixtures/bcn_weather.mp3", "rb") as f: + response = await client.audio.transcriptions.complete_async( + model=model, + file=File(content=f, file_name=f.name), + ) + print(response.text) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/mistral/audio/transcription_diarize_async.py b/examples/mistral/audio/transcription_diarize_async.py new file mode 100644 index 00000000..4b511c87 --- /dev/null +++ b/examples/mistral/audio/transcription_diarize_async.py @@ -0,0 +1,31 @@ +#!/usr/bin/env python + +import os +import asyncio +import pathlib +from mistralai.client import Mistral +from mistralai.client.models import File + +fixture_dir = pathlib.Path(__file__).parents[2] / "fixtures" + +async def main(): + api_key = os.environ["MISTRAL_API_KEY"] + model = "voxtral-mini-2602" + + client = Mistral(api_key=api_key) + with open(fixture_dir / "bcn_weather.mp3", "rb") as f: + response = await client.audio.transcriptions.complete_async( + model=model, + file=File(content=f, file_name=f.name), + diarize=True, + timestamp_granularities=["segment"], + ) + for segment in response.segments: + speaker = segment.speaker_id or "unknown" + print( + f"[{segment.start:.1f}s → {segment.end:.1f}s] {speaker}: {segment.text.strip()}" + ) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/mistral/audio/transcription_segments.py b/examples/mistral/audio/transcription_segments.py new file mode 100644 index 00000000..3d691711 --- /dev/null +++ b/examples/mistral/audio/transcription_segments.py @@ -0,0 +1,23 @@ +#!/usr/bin/env python + +import os + +from mistralai.client import Mistral + + +def main(): + api_key = os.environ["MISTRAL_API_KEY"] + model = "voxtral-mini-latest" + + client = Mistral(api_key=api_key) + response = client.audio.transcriptions.complete( + model=model, + file_url="https://docs.mistral.ai/audio/bcn_weather.mp3", + timestamp_granularities=["segment"], + ) + print(response.text) + print(response.segments) + + +if __name__ == "__main__": + main() diff --git a/examples/mistral/audio/transcription_segments_stream.py b/examples/mistral/audio/transcription_segments_stream.py new file mode 100644 index 00000000..32edf951 --- /dev/null +++ b/examples/mistral/audio/transcription_segments_stream.py @@ -0,0 +1,23 @@ +#!/usr/bin/env python + +import os + +from mistralai.client import Mistral + + +def main(): + api_key = os.environ["MISTRAL_API_KEY"] + model = "voxtral-mini-latest" + + client = Mistral(api_key=api_key) + response = client.audio.transcriptions.stream( + model=model, + file_url="https://docs.mistral.ai/audio/bcn_weather.mp3", + timestamp_granularities=["segment"], + ) + for chunk in response: + print(chunk) + + +if __name__ == "__main__": + main() diff --git a/examples/mistral/audio/transcription_stream_async.py b/examples/mistral/audio/transcription_stream_async.py new file mode 100644 index 00000000..3055f3de --- /dev/null +++ b/examples/mistral/audio/transcription_stream_async.py @@ -0,0 +1,24 @@ +#!/usr/bin/env python +import asyncio +import os + +from mistralai.client import Mistral +from mistralai.client.models import File + + +async def main(): + api_key = os.environ["MISTRAL_API_KEY"] + model = "voxtral-mini-2507" + + client = Mistral(api_key=api_key) + with open("examples/fixtures/bcn_weather.mp3", "rb") as f: + response = await client.audio.transcriptions.stream_async( + model=model, + file=File(content=f, file_name=f.name), + ) + async for chunk in response: + print(chunk.event, chunk.data) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/mistral/audio/transcription_url.py b/examples/mistral/audio/transcription_url.py new file mode 100644 index 00000000..907f830d --- /dev/null +++ b/examples/mistral/audio/transcription_url.py @@ -0,0 +1,21 @@ +#!/usr/bin/env python + +import os + +from mistralai.client import Mistral + + +def main(): + api_key = os.environ["MISTRAL_API_KEY"] + model = "voxtral-mini-latest" + + client = Mistral(api_key=api_key) + response = client.audio.transcriptions.complete( + model=model, + file_url="https://docs.mistral.ai/audio/bcn_weather.mp3", + ) + print(response.text) + + +if __name__ == "__main__": + main() diff --git a/examples/mistral/chat/async_chat_no_streaming.py b/examples/mistral/chat/async_chat_no_streaming.py new file mode 100755 index 00000000..ad45d0fd --- /dev/null +++ b/examples/mistral/chat/async_chat_no_streaming.py @@ -0,0 +1,25 @@ +#!/usr/bin/env python + +import asyncio +import os + +from mistralai.client import Mistral +from mistralai.client.models import UserMessage + + +async def main(): + api_key = os.environ["MISTRAL_API_KEY"] + model = "mistral-tiny" + + client = Mistral(api_key=api_key) + + chat_response = await client.chat.complete_async( + model=model, + messages=[UserMessage(content="What is the best French cheese?")], + ) + + print(chat_response.choices[0].message.content) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/mistral/chat/async_chat_with_image_no_streaming.py b/examples/mistral/chat/async_chat_with_image_no_streaming.py new file mode 100755 index 00000000..5d2cbdaa --- /dev/null +++ b/examples/mistral/chat/async_chat_with_image_no_streaming.py @@ -0,0 +1,35 @@ +#!/usr/bin/env python + +import asyncio +import os + + +from mistralai.client import Mistral +from mistralai.client.models import UserMessage + + +async def main(): + api_key = os.environ["MISTRAL_API_KEY"] + model = "pixtral-12b-2409" + client = Mistral(api_key=api_key) + + chat_response = await client.chat.complete_async( + model=model, + messages=[ + UserMessage( + content=[ + {"type": "text", "text": "What's in this image?"}, + { + "type": "image_url", + "image_url": "https://cms.mistral.ai/assets/a64b3821-3a4c-4d4d-b718-d653f3eb7a5e.png?", + }, + ] + ) + ], + ) + + print(chat_response.choices[0].message.content) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/mistral/chat/async_chat_with_streaming.py b/examples/mistral/chat/async_chat_with_streaming.py new file mode 100755 index 00000000..1642ea41 --- /dev/null +++ b/examples/mistral/chat/async_chat_with_streaming.py @@ -0,0 +1,32 @@ +#!/usr/bin/env python + +import asyncio +import os + +from mistralai.client import Mistral +from mistralai.client.models import UserMessage + + +async def main(): + api_key = os.environ["MISTRAL_API_KEY"] + model = "mistral-tiny" + + client = Mistral(api_key=api_key) + + print("Chat response:") + response = await client.chat.stream_async( + model=model, + messages=[ + UserMessage(content="What is the best French cheese?give the best 50") + ], + ) + assert response + async for chunk in response: + if chunk.data.choices[0].delta.content is not None: + print(chunk.data.choices[0].delta.content, end="") + + print("\n") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/mistral/chat/async_structured_outputs.py b/examples/mistral/chat/async_structured_outputs.py new file mode 100644 index 00000000..09ed5737 --- /dev/null +++ b/examples/mistral/chat/async_structured_outputs.py @@ -0,0 +1,37 @@ +#!/usr/bin/env python + +import asyncio +import os +from pydantic import BaseModel + +from mistralai.client import Mistral + + +async def main(): + api_key = os.environ["MISTRAL_API_KEY"] + client = Mistral(api_key=api_key) + + class Explanation(BaseModel): + explanation: str + output: str + + class MathDemonstration(BaseModel): + steps: list[Explanation] + final_answer: str + + chat_response = await client.chat.parse_async( + model="mistral-large-2411", + messages=[ + { + "role": "system", + "content": "You are a helpful math tutor. You will be provided with a math problem, and your goal will be to output a step by step solution, along with a final answer. For each step, just provide the output as an equation use the explanation field to detail the reasoning.", + }, + {"role": "user", "content": "How can I solve 8x + 7 = -23"}, + ], + response_format=MathDemonstration, + ) + print(chat_response.choices[0].message.parsed) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/mistral/chat/chat_no_streaming.py b/examples/mistral/chat/chat_no_streaming.py new file mode 100755 index 00000000..5f6968ca --- /dev/null +++ b/examples/mistral/chat/chat_no_streaming.py @@ -0,0 +1,23 @@ +#!/usr/bin/env python + +import os + +from mistralai.client import Mistral +from mistralai.client.models import UserMessage + + +def main(): + api_key = os.environ["MISTRAL_API_KEY"] + model = "mistral-tiny" + + client = Mistral(api_key=api_key) + + chat_response = client.chat.complete( + model=model, + messages=[UserMessage(content="What is the best French cheese?")], + ) + print(chat_response.choices[0].message.content) + + +if __name__ == "__main__": + main() diff --git a/examples/mistral/chat/chat_prediction.py b/examples/mistral/chat/chat_prediction.py new file mode 100644 index 00000000..88c57e77 --- /dev/null +++ b/examples/mistral/chat/chat_prediction.py @@ -0,0 +1,38 @@ +#!/usr/bin/env python + +import os + +from mistralai.client import Mistral +from mistralai.client.models import UserMessage + + +def main(): + + api_key = os.environ["MISTRAL_API_KEY"] + client = Mistral(api_key=api_key) + + code = """class Cheese(BaseModel): + name: str + type: str + country_of_origin: str + +my_cheese = Cheese(name="Brie", type="Soft", country_of_origin="France") +""" + refactor_prompt = 'Add a "price" property of type float to the Cheese class. Respond only with code, no explanation, no formatting.' + + chat_response = client.chat.complete( + model="codestral-latest", + messages=[ + UserMessage(content=refactor_prompt), + UserMessage(content=code) + ], + prediction= { + "type": "content", + "content": refactor_prompt, + } + ) + print(chat_response.choices[0].message.content) + + +if __name__ == "__main__": + main() diff --git a/examples/mistral/chat/chat_with_streaming.py b/examples/mistral/chat/chat_with_streaming.py new file mode 100755 index 00000000..94a3e29c --- /dev/null +++ b/examples/mistral/chat/chat_with_streaming.py @@ -0,0 +1,23 @@ +#!/usr/bin/env python + +import os + +from mistralai.client import Mistral +from mistralai.client.models import UserMessage + + +def main(): + api_key = os.environ["MISTRAL_API_KEY"] + model = "mistral-large-latest" + + client = Mistral(api_key=api_key) + + for chunk in client.chat.stream( + model=model, + messages=[UserMessage(content="What is the best French cheese?")], + ): + print(chunk.data.choices[0].delta.content, end="") + + +if __name__ == "__main__": + main() diff --git a/examples/mistral/chat/chatbot_with_streaming.py b/examples/mistral/chat/chatbot_with_streaming.py new file mode 100755 index 00000000..eae79dcf --- /dev/null +++ b/examples/mistral/chat/chatbot_with_streaming.py @@ -0,0 +1,269 @@ +#!/usr/bin/env python + +# Simple chatbot example -- run with -h argument to see options. + +import argparse +import logging +import os +import readline +import sys +from typing import Any + +from mistralai.client import Mistral +from mistralai.client.models import AssistantMessage, SystemMessage, UserMessage + +MODEL_LIST = [ + "mistral-small-latest", + "mistral-medium-latest", + "mistral-large-latest", + "codestral-latest", +] +DEFAULT_MODEL = "mistral-small-latest" +DEFAULT_TEMPERATURE = 0.7 +LOG_FORMAT = "%(asctime)s - %(levelname)s - %(message)s" +# A dictionary of all commands and their arguments, used for tab completion. +COMMAND_LIST: dict[str, Any] = { + "/new": {}, + "/help": {}, + "/model": {model: {} for model in MODEL_LIST}, # Nested completions for models + "/system": {}, + "/temperature": {}, + "/config": {}, + "/quit": {}, + "/exit": {}, +} + +logger = logging.getLogger("chatbot") + + +def find_completions(command_dict, parts): + if not parts: + return command_dict.keys() + if parts[0] in command_dict: + return find_completions(command_dict[parts[0]], parts[1:]) + else: + return [cmd for cmd in command_dict if cmd.startswith(parts[0])] + + +def completer(text, state): + buffer = readline.get_line_buffer() + line_parts = buffer.lstrip().split(" ") + options = find_completions(COMMAND_LIST, line_parts[:-1]) + + try: + return [option for option in options if option.startswith(line_parts[-1])][ + state + ] + except IndexError: + return None + + +readline.set_completer(completer) +readline.set_completer_delims(" ") +# Enable tab completion +readline.parse_and_bind("tab: complete") + + +class ChatBot: + def __init__( + self, api_key, model, system_message=None, temperature=DEFAULT_TEMPERATURE + ): + if not api_key: + raise ValueError("An API key must be provided to use the Mistral API.") + self.client = Mistral(api_key=api_key) + self.model = model + self.temperature = temperature + self.system_message = system_message + + def opening_instructions(self): + print( + """ +To chat: type your message and hit enter +To start a new chat: /new +To switch model: /model +To switch system message: /system +To switch temperature: /temperature +To see current config: /config +To exit: /exit, /quit, or hit CTRL+C +To see this help: /help +""" + ) + + def new_chat(self): + print("") + print( + f"Starting new chat with model: {self.model}, temperature: {self.temperature}" + ) + print("") + self.messages = [] + if self.system_message: + self.messages.append(SystemMessage(content=self.system_message)) + + def switch_model(self, input): + model = self.get_arguments(input) + if model in MODEL_LIST: + self.model = model + logger.info(f"Switching model: {model}") + else: + logger.error(f"Invalid model name: {model}") + + def switch_system_message(self, input): + system_message = self.get_arguments(input) + if system_message: + self.system_message = system_message + logger.info(f"Switching system message: {system_message}") + self.new_chat() + else: + logger.error(f"Invalid system message: {system_message}") + + def switch_temperature(self, input): + temperature = self.get_arguments(input) + try: + temperature = float(temperature) + if temperature < 0 or temperature > 1: + raise ValueError + self.temperature = temperature + logger.info(f"Switching temperature: {temperature}") + except ValueError: + logger.error(f"Invalid temperature: {temperature}") + + def show_config(self): + print("") + print(f"Current model: {self.model}") + print(f"Current temperature: {self.temperature}") + print(f"Current system message: {self.system_message}") + print("") + + def collect_user_input(self): + print("") + return input("YOU: ") + + def run_inference(self, content): + print("") + print("MISTRAL:") + print("") + + self.messages.append(UserMessage(content=content)) + + assistant_response = "" + logger.debug( + f"Running inference with model: {self.model}, temperature: {self.temperature}" + ) + logger.debug(f"Sending messages: {self.messages}") + for chunk in self.client.chat.stream( + model=self.model, temperature=self.temperature, messages=self.messages + ): + response = chunk.data.choices[0].delta.content + if response is not None: + print(response, end="", flush=True) + assistant_response += response + + print("", flush=True) + + if assistant_response: + self.messages.append(AssistantMessage(content=assistant_response)) + logger.debug(f"Current messages: {self.messages}") + + def get_command(self, input): + return input.split()[0].strip() + + def get_arguments(self, input): + try: + return " ".join(input.split()[1:]) + except IndexError: + return "" + + def is_command(self, input): + return self.get_command(input) in COMMAND_LIST + + def execute_command(self, input): + command = self.get_command(input) + if command in ["/exit", "/quit"]: + self.exit() + elif command == "/help": + self.opening_instructions() + elif command == "/new": + self.new_chat() + elif command == "/model": + self.switch_model(input) + elif command == "/system": + self.switch_system_message(input) + elif command == "/temperature": + self.switch_temperature(input) + elif command == "/config": + self.show_config() + + def start(self): + self.opening_instructions() + self.new_chat() + while True: + try: + input = self.collect_user_input() + if self.is_command(input): + self.execute_command(input) + else: + self.run_inference(input) + + except KeyboardInterrupt: + self.exit() + + def exit(self): + logger.debug("Exiting chatbot") + sys.exit(0) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="A simple chatbot using the Mistral API" + ) + parser.add_argument( + "--api-key", + default=os.environ.get("MISTRAL_API_KEY"), + help="Mistral API key. Defaults to environment variable MISTRAL_API_KEY", + ) + parser.add_argument( + "-m", + "--model", + choices=MODEL_LIST, + default=DEFAULT_MODEL, + help="Model for chat inference. Choices are %(choices)s. Defaults to %(default)s", + ) + parser.add_argument( + "-s", "--system-message", help="Optional system message to prepend." + ) + parser.add_argument( + "-t", + "--temperature", + type=float, + default=DEFAULT_TEMPERATURE, + help="Optional temperature for chat inference. Defaults to %(default)s", + ) + parser.add_argument( + "-d", "--debug", action="store_true", help="Enable debug logging" + ) + + args = parser.parse_args() + + if args.debug: + logger.setLevel(logging.DEBUG) + else: + logger.setLevel(logging.INFO) + + formatter = logging.Formatter(LOG_FORMAT) + + ch = logging.StreamHandler() + ch.setFormatter(formatter) + logger.addHandler(ch) + + logger.debug( + f"Starting chatbot with model: {args.model}, " + f"temperature: {args.temperature}, " + f"system message: {args.system_message}" + ) + + try: + bot = ChatBot(args.api_key, args.model, args.system_message, args.temperature) + bot.start() + except Exception as e: + logger.error(e) + sys.exit(1) diff --git a/examples/mistral/chat/completion_with_streaming.py b/examples/mistral/chat/completion_with_streaming.py new file mode 100644 index 00000000..399e8638 --- /dev/null +++ b/examples/mistral/chat/completion_with_streaming.py @@ -0,0 +1,28 @@ +#!/usr/bin/env python + +import asyncio +import os + +from mistralai.client import Mistral + + +async def main(): + api_key = os.environ["MISTRAL_API_KEY"] + + client = Mistral(api_key=api_key) + + prompt = "def fibonacci(n: int):" + suffix = "n = int(input('Enter a number: '))\nprint(fibonacci(n))" + + print(prompt) + for chunk in client.fim.stream( + model="codestral-latest", + prompt=prompt, + suffix=suffix, + ): + print(chunk.data.choices[0].delta.content, end="") + print(suffix) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/mistral/chat/function_calling.py b/examples/mistral/chat/function_calling.py new file mode 100644 index 00000000..68e9d91c --- /dev/null +++ b/examples/mistral/chat/function_calling.py @@ -0,0 +1,132 @@ +import functools +import json +import os +from typing import Any + +from mistralai.client import Mistral +from mistralai.client.models import ( + AssistantMessage, + ChatCompletionRequestMessage, + Function, + Tool, + ToolMessage, + UserMessage, +) + +# Assuming we have the following data +data: dict[str, list[Any]] = { + "transaction_id": ["T1001", "T1002", "T1003", "T1004", "T1005"], + "customer_id": ["C001", "C002", "C003", "C002", "C001"], + "payment_amount": [125.50, 89.99, 120.00, 54.30, 210.20], + "payment_date": [ + "2021-10-05", + "2021-10-06", + "2021-10-07", + "2021-10-05", + "2021-10-08", + ], + "payment_status": ["Paid", "Unpaid", "Paid", "Paid", "Pending"], +} + + +def retrieve_payment_status(data: dict[str, list[Any]], transaction_id: str) -> str: + for i, r in enumerate(data["transaction_id"]): + if r == transaction_id: + return json.dumps({"status": data["payment_status"][i]}) + return json.dumps({"status": "Error - transaction id not found"}) + + +def retrieve_payment_date(data: dict[str, list[Any]], transaction_id: str) -> str: + for i, r in enumerate(data["transaction_id"]): + if r == transaction_id: + return json.dumps({"date": data["payment_date"][i]}) + return json.dumps({"status": "Error - transaction id not found"}) + + +names_to_functions = { + "retrieve_payment_status": functools.partial(retrieve_payment_status, data=data), + "retrieve_payment_date": functools.partial(retrieve_payment_date, data=data), +} + +tools: list[Tool] = [ + Tool( + function=Function( + name="retrieve_payment_status", + description="Get payment status of a transaction id", + parameters={ + "type": "object", + "required": ["transaction_id"], + "properties": { + "transaction_id": { + "type": "string", + "description": "The transaction id.", + } + }, + }, + ), + ), + Tool( + function=Function( + name="retrieve_payment_date", + description="Get payment date of a transaction id", + parameters={ + "type": "object", + "required": ["transaction_id"], + "properties": { + "transaction_id": { + "type": "string", + "description": "The transaction id.", + } + }, + }, + ), + ), +] + +api_key = os.environ["MISTRAL_API_KEY"] +model = "mistral-small-latest" + +client = Mistral(api_key=api_key) + +messages: list[ChatCompletionRequestMessage] = [ + UserMessage(content="What's the status of my transaction?") +] + +response = client.chat.complete(model=model, messages=messages, tools=tools, temperature=0) + +print(response.choices[0].message.content) + +messages.append(AssistantMessage(content=response.choices[0].message.content)) +messages.append(UserMessage(content="My transaction ID is T1001.")) + +response = client.chat.complete(model=model, messages=messages, tools=tools, temperature=0) + +tool_calls = response.choices[0].message.tool_calls +if not tool_calls: + raise RuntimeError("Expected tool calls") +tool_call = tool_calls[0] +function_name = tool_call.function.name +function_params = json.loads(str(tool_call.function.arguments)) + +print(f"calling function_name: {function_name}, with function_params: {function_params}") + +function_result = names_to_functions[function_name](**function_params) + +messages.append( + AssistantMessage( + content=response.choices[0].message.content, + tool_calls=response.choices[0].message.tool_calls, + ) +) +messages.append( + ToolMessage( + name=function_name, + content=function_result, + tool_call_id=tool_call.id, + ) +) +print(messages) + +response = client.chat.complete(model=model, messages=messages, tools=tools, temperature=0) + +print(f"{response.choices[0].message.content}") diff --git a/examples/mistral/chat/json_format.py b/examples/mistral/chat/json_format.py new file mode 100755 index 00000000..8fa1416a --- /dev/null +++ b/examples/mistral/chat/json_format.py @@ -0,0 +1,28 @@ +#!/usr/bin/env python + +import os + +from mistralai.client import Mistral +from mistralai.client.models import UserMessage + + +def main(): + api_key = os.environ["MISTRAL_API_KEY"] + model = "mistral-large-latest" + + client = Mistral(api_key=api_key) + + chat_response = client.chat.complete( + model=model, + response_format={"type": "json_object"}, + messages=[ + UserMessage( + content="What is the best French cheese? Answer shortly in JSON.", + ) + ], + ) + print(chat_response.choices[0].message.content) + + +if __name__ == "__main__": + main() diff --git a/examples/mistral/chat/structured_outputs.py b/examples/mistral/chat/structured_outputs.py new file mode 100644 index 00000000..64521f46 --- /dev/null +++ b/examples/mistral/chat/structured_outputs.py @@ -0,0 +1,55 @@ +#!/usr/bin/env python + +import os +from pydantic import BaseModel + +from mistralai.client import Mistral + + +def main(): + api_key = os.environ["MISTRAL_API_KEY"] + client = Mistral(api_key=api_key) + + class Explanation(BaseModel): + explanation: str + output: str + + class MathDemonstration(BaseModel): + steps: list[Explanation] + final_answer: str + + print("Using the .parse method to parse the response into a Pydantic model:\n") + chat_response = client.chat.parse( + model="mistral-large-latest", + messages=[ + { + "role": "system", + "content": "You are a helpful math tutor. You will be provided with a math problem, and your goal will be to output a step by step solution, along with a final answer. For each step, just provide the output as an equation use the explanation field to detail the reasoning.", + }, + {"role": "user", "content": "How can I solve 8x + 7 = -23"}, + ], + response_format=MathDemonstration, + ) + print(chat_response.choices[0].message.parsed) + + # Or with the streaming API + print( + "\nUsing the .parse_stream method to stream back the response into a JSON Schema:\n" + ) + with client.chat.parse_stream( + model="mistral-large-latest", + messages=[ + { + "role": "system", + "content": "You are a helpful math tutor. You will be provided with a math problem, and your goal will be to output a step by step solution, along with a final answer. For each step, just provide the output as an equation use the explanation field to detail the reasoning.", + }, + {"role": "user", "content": "How can I solve 8x + 7 = -23"}, + ], + response_format=MathDemonstration, + ) as stream: + for chunk in stream: + print(chunk.data.choices[0].delta.content, end="") + + +if __name__ == "__main__": + main() diff --git a/examples/mistral/chat/structured_outputs_with_json_schema.py b/examples/mistral/chat/structured_outputs_with_json_schema.py new file mode 100644 index 00000000..2f99f747 --- /dev/null +++ b/examples/mistral/chat/structured_outputs_with_json_schema.py @@ -0,0 +1,117 @@ +#!/usr/bin/env python + +import os + +from mistralai.client import Mistral + + +def main(): + api_key = os.environ["MISTRAL_API_KEY"] + client = Mistral(api_key=api_key) + + print("Using the .complete method to input a raw json schema to the API:\n") + # When providing raw JSON Schema to the SDK you need to have 'additionalProperties': False in the schema definition + # This is because the API is only accepting a strict JSON Schema + chat_response = client.chat.complete( + model="mistral-large-latest", + messages=[ + { + "role": "system", + "content": "You are a helpful math tutor. You will be provided with a math problem, and your goal will be to output a step by step solution, along with a final answer. For each step, just provide the output as an equation use the explanation field to detail the reasoning.", + }, + {"role": "user", "content": "How can I solve 8x + 7 = -23"}, + ], + response_format={ + "type": "json_schema", + "json_schema": { + "name": "MathDemonstration", + "schema_definition": { + "$defs": { + "Explanation": { + "properties": { + "explanation": { + "title": "Explanation", + "type": "string", + }, + "output": {"title": "Output", "type": "string"}, + }, + "required": ["explanation", "output"], + "title": "Explanation", + "type": "object", + "additionalProperties": False, + } + }, + "properties": { + "steps": { + "items": {"$ref": "#/$defs/Explanation"}, + "title": "Steps", + "type": "array", + }, + "final_answer": {"title": "Final Answer", "type": "string"}, + }, + "required": ["steps", "final_answer"], + "title": "MathDemonstration", + "type": "object", + "additionalProperties": False, + }, + "description": None, + "strict": True, + }, + }, + ) + print(chat_response.choices[0].message.content) + + # Or with the streaming API + with client.chat.stream( + model="mistral-large-latest", + messages=[ + { + "role": "system", + "content": "You are a helpful math tutor. You will be provided with a math problem, and your goal will be to output a step by step solution, along with a final answer. For each step, just provide the output as an equation use the explanation field to detail the reasoning.", + }, + {"role": "user", "content": "How can I solve 8x + 7 = -23"}, + ], + response_format={ + "type": "json_schema", + "json_schema": { + "name": "MathDemonstration", + "schema_definition": { + "$defs": { + "Explanation": { + "properties": { + "explanation": { + "title": "Explanation", + "type": "string", + }, + "output": {"title": "Output", "type": "string"}, + }, + "required": ["explanation", "output"], + "title": "Explanation", + "type": "object", + "additionalProperties": False, + } + }, + "properties": { + "steps": { + "items": {"$ref": "#/$defs/Explanation"}, + "title": "Steps", + "type": "array", + }, + "final_answer": {"title": "Final Answer", "type": "string"}, + }, + "required": ["steps", "final_answer"], + "title": "MathDemonstration", + "type": "object", + "additionalProperties": False, + }, + "description": None, + "strict": True, + }, + }, + ) as stream: + for chunk in stream: + print(chunk.data.choices[0].delta.content, end="") + + +if __name__ == "__main__": + main() diff --git a/examples/mistral/chat/structured_outputs_with_pydantic.py b/examples/mistral/chat/structured_outputs_with_pydantic.py new file mode 100644 index 00000000..ded9d52d --- /dev/null +++ b/examples/mistral/chat/structured_outputs_with_pydantic.py @@ -0,0 +1,57 @@ +#!/usr/bin/env python + +import os +from pydantic import BaseModel + +from mistralai.client import Mistral + +from typing import List + + +def main(): + api_key = os.environ["MISTRAL_API_KEY"] + client = Mistral(api_key=api_key) + + class Explanation(BaseModel): + explanation: str + output: str + + class MathDemonstration(BaseModel): + steps: List[Explanation] + final_answer: str + + print("Using the .parse method to parse the response into a Pydantic model:\n") + chat_response = client.chat.parse( + model="mistral-large-latest", + messages=[ + { + "role": "system", + "content": "You are a helpful math tutor. You will be provided with a math problem, and your goal will be to output a step by step solution, along with a final answer. For each step, just provide the output as an equation use the explanation field to detail the reasoning.", + }, + {"role": "user", "content": "How can I solve 8x + 7 = -23"}, + ], + response_format=MathDemonstration, + ) + print(chat_response.choices[0].message.parsed) + + # Or with the streaming API + print( + "\nUsing the .parse_stream method to stream back the response into a JSON Schema:\n" + ) + with client.chat.parse_stream( + model="mistral-large-latest", + messages=[ + { + "role": "system", + "content": "You are a helpful math tutor. You will be provided with a math problem, and your goal will be to output a step by step solution, along with a final answer. For each step, just provide the output as an equation use the explanation field to detail the reasoning.", + }, + {"role": "user", "content": "How can I solve 8x + 7 = -23"}, + ], + response_format=MathDemonstration, + ) as stream: + for chunk in stream: + print(chunk.data.choices[0].delta.content, end="") + + +if __name__ == "__main__": + main() diff --git a/examples/mistral/classifier/async_classifier.py b/examples/mistral/classifier/async_classifier.py new file mode 100644 index 00000000..881f6a69 --- /dev/null +++ b/examples/mistral/classifier/async_classifier.py @@ -0,0 +1,107 @@ +#!/usr/bin/env python + +from pprint import pprint +import asyncio +from mistralai.client import Mistral +from mistralai.client.models import ClassifierFineTuningJob, ClassifierFineTuningJobDetails, ClassifierTrainingParameters, TrainingFile + +import os + + +async def upload_files(client: Mistral, file_names: list[str]) -> list[str]: + # Upload files + print("Uploading files...") + + file_ids = [] + for file_name in file_names: + with open(file_name, "rb") as file: + f = await client.files.upload_async( + file={ + "file_name": file_name, + "content": file.read(), + }, + purpose="fine-tune", + ) + file_ids.append(f.id) + print("Files uploaded...") + return file_ids + + +async def train_classifier(client: Mistral, training_file_ids: list[str]) -> str | None: + print("Creating job...") + job = await client.fine_tuning.jobs.create_async( + model="ministral-3b-latest", + job_type="classifier", + training_files=[ + TrainingFile(file_id=training_file_id) + for training_file_id in training_file_ids + ], + hyperparameters=ClassifierTrainingParameters( + learning_rate=0.0001, + ), + auto_start=True, + ) + if not isinstance(job, ClassifierFineTuningJob): + print("Unexpected job type returned") + return None + + print(f"Job created ({job.id})") + + i = 1 + while True: + await asyncio.sleep(10) + detailed_job = await client.fine_tuning.jobs.get_async(job_id=job.id) + if not isinstance(detailed_job, ClassifierFineTuningJobDetails): + raise Exception(f"Unexpected job type: {type(detailed_job)}") + if detailed_job.status not in [ + "QUEUED", + "STARTED", + "VALIDATING", + "VALIDATED", + "RUNNING", + ]: + break + print(f"Still training after {i * 10} seconds") + i += 1 + + if detailed_job.status != "SUCCESS": + print("Training failed") + raise Exception(f"Job failed {detailed_job.status}") + + if not detailed_job.fine_tuned_model: + print("No fine-tuned model returned") + return None + print(f"Training succeed: {detailed_job.fine_tuned_model}") + + return detailed_job.fine_tuned_model + + +async def main(): + training_files = ["./examples/fixtures/classifier_sentiments.jsonl"] + client = Mistral( + api_key=os.environ["MISTRAL_API_KEY"], + ) + + training_file_ids: list[str] = await upload_files(client=client, file_names=training_files) + model_name: str | None = await train_classifier(client=client,training_file_ids=training_file_ids) + + if model_name: + print("Calling inference...") + response = client.classifiers.classify( + model=model_name, + inputs=["It's nice", "It's terrible", "Why not"], + ) + print("Inference succeed !") + pprint(response) + + print("Calling inference (Chat)...") + response = client.classifiers.classify_chat( + model=model_name, + inputs={"messages": [{"role": "user", "content": "Lame..."}]}, + ) + print("Inference succeed (Chat)!") + pprint(response) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/mistral/embeddings/async_embeddings.py b/examples/mistral/embeddings/async_embeddings.py new file mode 100755 index 00000000..413769f3 --- /dev/null +++ b/examples/mistral/embeddings/async_embeddings.py @@ -0,0 +1,22 @@ +#!/usr/bin/env python + +import asyncio +import os + +from mistralai.client import Mistral + + +async def main(): + api_key = os.environ["MISTRAL_API_KEY"] + + client = Mistral(api_key=api_key) + + embeddings_batch_response = await client.embeddings.create_async( + model="mistral-embed", + inputs=["What is the best French cheese?"] * 10, + ) + print(embeddings_batch_response) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/mistral/embeddings/embeddings.py b/examples/mistral/embeddings/embeddings.py new file mode 100755 index 00000000..64301ca0 --- /dev/null +++ b/examples/mistral/embeddings/embeddings.py @@ -0,0 +1,22 @@ +#!/usr/bin/env python + +import os + +from mistralai.client import Mistral + + +def main(): + api_key = os.environ["MISTRAL_API_KEY"] + + client = Mistral(api_key=api_key) + + embeddings_response = client.embeddings.create( + model="mistral-embed", + inputs=["What is the best French cheese?"] * 10, + ) + + print(embeddings_response) + + +if __name__ == "__main__": + main() diff --git a/examples/mistral/fim/async_code_completion.py b/examples/mistral/fim/async_code_completion.py new file mode 100644 index 00000000..cb6db241 --- /dev/null +++ b/examples/mistral/fim/async_code_completion.py @@ -0,0 +1,33 @@ +#!/usr/bin/env python + +import asyncio +import os + +from mistralai.client import Mistral + + +async def main(): + api_key = os.environ["MISTRAL_API_KEY"] + + client = Mistral(api_key=api_key) + + prompt = "def fibonacci(n: int):" + suffix = "n = int(input('Enter a number: '))\nprint(fibonacci(n))" + + response = await client.fim.complete_async( + model="codestral-latest", + prompt=prompt, + suffix=suffix, + ) + + print( + f""" +{prompt} +{response.choices[0].message.content} +{suffix} +""" + ) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/mistral/fim/code_completion.py b/examples/mistral/fim/code_completion.py new file mode 100644 index 00000000..4f25c59c --- /dev/null +++ b/examples/mistral/fim/code_completion.py @@ -0,0 +1,33 @@ +#!/usr/bin/env python + +import asyncio +import os + +from mistralai.client import Mistral + + +async def main(): + api_key = os.environ["MISTRAL_API_KEY"] + + client = Mistral(api_key=api_key) + + prompt = "def fibonacci(n: int):" + suffix = "n = int(input('Enter a number: '))\nprint(fibonacci(n))" + + response = client.fim.complete( + model="codestral-latest", + prompt=prompt, + suffix=suffix, + ) + print( + f""" +{prompt} +{response.choices[0].message.content} +{response.choices[0].message.role} +{suffix} +""" + ) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/mistral/jobs/async_batch_job_chat_completion_inline.py b/examples/mistral/jobs/async_batch_job_chat_completion_inline.py new file mode 100644 index 00000000..d2a1679f --- /dev/null +++ b/examples/mistral/jobs/async_batch_job_chat_completion_inline.py @@ -0,0 +1,49 @@ +from mistralai.client import Mistral +from mistralai.client.models import BatchRequest, UserMessage +import os +import asyncio + + +async def main(): + client = Mistral(api_key=os.environ["MISTRAL_API_KEY"]) + + requests = [BatchRequest( + custom_id=str(i), + body=dict( + model="mistral-medium-latest", + messages=[UserMessage( + content=f"What's i + {i}" + )] + ) + ) for i in range(5) + ] + + job = await client.batch.jobs.create_async( + requests=requests, + model="mistral-small-latest", + endpoint="/v1/chat/completions", + metadata={"job_type": "testing"} + ) + + print(f"Created job with ID: {job.id}") + + max_wait = 60 # 1 minute timeout for CI + elapsed = 0 + while job.status not in ["SUCCESS", "FAILED", "CANCELLED"]: + await asyncio.sleep(1) + elapsed += 1 + if elapsed >= max_wait: + print(f"Timeout after {max_wait}s, job still {job.status}") + return + job = await client.batch.jobs.get_async(job_id=job.id) + print(f"Job status: {job.status}") + + print(f"Job is done, status {job.status}") + if job.outputs: + for res in job.outputs: + print(res["response"]["body"]) + else: + print(f"No outputs (succeeded: {job.succeeded_requests}, failed: {job.failed_requests})") + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/mistral/jobs/async_files.py b/examples/mistral/jobs/async_files.py new file mode 100644 index 00000000..4bec5237 --- /dev/null +++ b/examples/mistral/jobs/async_files.py @@ -0,0 +1,38 @@ +#!/usr/bin/env python + +import asyncio +import os + +from mistralai.client import Mistral +from mistralai.client.models import File + + +async def main(): + api_key = os.environ["MISTRAL_API_KEY"] + + client = Mistral(api_key=api_key) + + # Create a new file + created_file = await client.files.upload_async( + file=File( + file_name="training_file.jsonl", + content=open("examples/fixtures/ft_training_file.jsonl", "rb").read(), + ) + ) + print(created_file) + + # List files + files = await client.files.list_async() + print(files) + + # Retrieve a file + retrieved_file = await client.files.retrieve_async(file_id=created_file.id) + print(retrieved_file) + + # Delete a file + deleted_file = await client.files.delete_async(file_id=created_file.id) + print(deleted_file) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/mistral/jobs/async_fine_tuning.py b/examples/mistral/jobs/async_fine_tuning.py new file mode 100644 index 00000000..080dbe03 --- /dev/null +++ b/examples/mistral/jobs/async_fine_tuning.py @@ -0,0 +1,55 @@ +#!/usr/bin/env python + +import asyncio +import os + +from mistralai.client import Mistral +from mistralai.client.models import File, CompletionTrainingParametersIn + + +async def main(): + api_key = os.environ["MISTRAL_API_KEY"] + + client = Mistral(api_key=api_key) + + # Create new files + with open("examples/fixtures/ft_training_file.jsonl", "rb") as f: + training_file = await client.files.upload_async( + file=File(file_name="file.jsonl", content=f) + ) + with open("examples/fixtures/ft_validation_file.jsonl", "rb") as f: + validation_file = await client.files.upload_async( + file=File(file_name="validation_file.jsonl", content=f) + ) + + # Create a new job + created_job = await client.fine_tuning.jobs.create_async( + model="mistral-small-latest", + training_files=[{"file_id": training_file.id, "weight": 1}], + validation_files=[validation_file.id], + hyperparameters=CompletionTrainingParametersIn( + training_steps=1, + learning_rate=0.0001, + ), + ) + print(created_job) + + # List jobs + jobs = await client.fine_tuning.jobs.list_async(page=0, page_size=5) + print(jobs) + + # Retrieve a job + retrieved_job = await client.fine_tuning.jobs.get_async(job_id=created_job.id) + print(retrieved_job) + + # Cancel a job + canceled_job = await client.fine_tuning.jobs.cancel_async(job_id=created_job.id) + print(canceled_job) + + # Delete files + await client.files.delete_async(file_id=training_file.id) + await client.files.delete_async(file_id=validation_file.id) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/mistral/jobs/async_fine_tuning_chat.py b/examples/mistral/jobs/async_fine_tuning_chat.py new file mode 100644 index 00000000..f170fed4 --- /dev/null +++ b/examples/mistral/jobs/async_fine_tuning_chat.py @@ -0,0 +1,127 @@ +#!/usr/bin/env python +import asyncio +import json +import os +import random +from pathlib import Path + +from mistralai.client import Mistral +from mistralai.client.models import ( + File, + CompletionTrainingParametersIn, +) + +POLLING_INTERVAL = 10 + +cwd = Path(__file__).parent + +user_contents = [ + "How far is the Moon from Earth?", + "What's the largest ocean on Earth?", + "How many continents are there?", + "What's the powerhouse of the cell?", + "What's the speed of light?", + "Can you solve a Rubik's Cube?", + "What is the tallest mountain in the world?", + "Who painted the Mona Lisa?", +] + +# List of assistant contents +assistant_contents = [ + "Around 384,400 kilometers. Give or take a few, like that really matters.", + "The Pacific Ocean. You know, the one that covers more than 60 million square miles. No big deal.", + "There are seven continents. I hope that wasn't too hard to count.", + "The mitochondria. Remember that from high school biology?", + "Approximately 299,792 kilometers per second. You know, faster than your internet speed.", + "I could if I had hands. What's your excuse?", + "Mount Everest, standing at 29,029 feet. You know, just a little hill.", + "Leonardo da Vinci. Just another guy who liked to doodle.", +] + +system_message = "Marv is a factual chatbot that is also sarcastic" + +def create_validation_file() -> bytes: + return json.dumps({ + "messages": [ + {"role": "user", "content": "How long does it take to travel around the Earth?"}, + {"role": "assistant", "content": "Around 24 hours if you're the Earth itself. For you, depends on your mode of transportation."} + ], + "temperature": random.random() + }).encode() + +async def main(): + api_key = os.environ["MISTRAL_API_KEY"] + client = Mistral(api_key=api_key) + + requests = [] + for um, am in zip( + random.sample(user_contents, len(user_contents)), + random.sample(assistant_contents, len(assistant_contents)), + ): + requests.append(json.dumps({ + "messages": [ + {"role": "system", "content": system_message}, + {"role": "user", "content": um}, + {"role": "assistant", "content": am}, + ] + })) + + # Create new files + training_file = await client.files.upload_async( + file=File( + file_name="file.jsonl", content=("\n".join(requests)).encode() + ), + purpose="fine-tune", + ) + + validation_file = await client.files.upload_async( + file=File( + file_name="validation_file.jsonl", content=create_validation_file() + ), + purpose="fine-tune", + ) + # Create a new job + created_job = await client.fine_tuning.jobs.create_async( + model="mistral-small-latest", + training_files=[{"file_id": training_file.id, "weight": 1}], + validation_files=[validation_file.id], + hyperparameters=CompletionTrainingParametersIn( + training_steps=1, + learning_rate=0.0001, + ), + ) + + while created_job.status in ["RUNNING", "STARTED", "QUEUED", "VALIDATING", "VALIDATED"]: + created_job = await client.fine_tuning.jobs.get_async(job_id=created_job.id) + print(f"Job is {created_job.status}, waiting {POLLING_INTERVAL} seconds") + await asyncio.sleep(POLLING_INTERVAL) + + if created_job.status == "FAILED": + print("Job failed") + raise Exception(f"Job failed with {created_job.status}") + + print(created_job) + # Chat with model + response = await client.chat.complete_async( + model=created_job.fine_tuned_model, + messages=[ + { + "role": "system", + "content": "Marv is a factual chatbot that is also sarcastic.", + }, + {"role": "user", "content": "What is the capital of France ?"}, + ], + ) + + print(response.choices[0].message.content) + + # Delete files + await client.files.delete_async(file_id=training_file.id) + await client.files.delete_async(file_id=validation_file.id) + + # Delete fine-tuned model + await client.models.delete_async(model_id=created_job.fine_tuned_model) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/mistral/jobs/async_jobs_ocr_batch_annotation.py b/examples/mistral/jobs/async_jobs_ocr_batch_annotation.py new file mode 100644 index 00000000..f209507d --- /dev/null +++ b/examples/mistral/jobs/async_jobs_ocr_batch_annotation.py @@ -0,0 +1,101 @@ +#!/usr/bin/env python +import asyncio +import json +import os +from typing import List + +import httpx +from pydantic import BaseModel, Field + +from mistralai.client import Mistral +from mistralai.extra import response_format_from_pydantic_model +from mistralai.client.models import File + +SAMPLE_PDF_URL = "https://arxiv.org/pdf/2401.04088" + + +class Table(BaseModel): + name: str = Field(description="The name or title of the table") + + +class TableExtraction(BaseModel): + tables: List[Table] = Field(description="List of tables found in the document") + + +def create_ocr_batch_request(custom_id: str, document_url: str) -> dict: + """Batch requests require custom_id and body wrapper.""" + response_format = response_format_from_pydantic_model(TableExtraction) + return { + "custom_id": custom_id, + "body": { + "document": {"type": "document_url", "document_url": document_url}, + "document_annotation_format": response_format.model_dump( + by_alias=True, exclude_none=True + ), + "pages": [0, 1, 2, 3, 4, 5, 6, 7], + "include_image_base64": False, + }, + } + + +async def main(): + client = Mistral(api_key=os.environ["MISTRAL_API_KEY"]) + + document_urls = [SAMPLE_PDF_URL] + + batch_requests = [ + json.dumps(create_ocr_batch_request(custom_id=str(i), document_url=url)) + for i, url in enumerate(document_urls) + ] + batch_content = "\n".join(batch_requests) + + print("Uploading batch file...") + batch_file = await client.files.upload_async( + file=File(file_name="ocr_batch.jsonl", content=batch_content.encode()), + purpose="batch", + ) + print(f"Batch file uploaded: {batch_file.id}") + + print("Creating batch job...") + created_job = await client.batch.jobs.create_async( + model="mistral-ocr-latest", + input_files=[batch_file.id], + endpoint="/v1/ocr", + ) + print(f"Batch job created: {created_job.id}") + + print("Waiting for job completion...") + job = await client.batch.jobs.get_async(job_id=created_job.id) + while job.status not in ["SUCCESS", "FAILED", "CANCELLED"]: + print(f"Status: {job.status}") + await asyncio.sleep(5) + job = await client.batch.jobs.get_async(job_id=created_job.id) + + print(f"Job status: {job.status}") + + async with httpx.AsyncClient() as http_client: + if job.output_file: + signed_url = await client.files.get_signed_url_async( + file_id=job.output_file + ) + response = await http_client.get(signed_url.url) + for line in response.content.decode().strip().split("\n"): + result = json.loads(line) + annotation = result["response"]["body"].get("document_annotation") + if annotation: + tables = TableExtraction.model_validate_json(annotation) + for table in tables.tables: + print(table.name) + + if job.error_file: + signed_url = await client.files.get_signed_url_async(file_id=job.error_file) + response = await http_client.get(signed_url.url) + print("Errors:", response.content.decode()) + + print("\nCleaning up...") + await client.files.delete_async(file_id=batch_file.id) + print("Done!") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/mistral/jobs/files.py b/examples/mistral/jobs/files.py new file mode 100644 index 00000000..50f6472c --- /dev/null +++ b/examples/mistral/jobs/files.py @@ -0,0 +1,37 @@ +#!/usr/bin/env python + +import os + +from mistralai.client import Mistral +from mistralai.client.models import File + + +def main(): + api_key = os.environ["MISTRAL_API_KEY"] + + client = Mistral(api_key=api_key) + + # Create a new file + created_file = client.files.upload( + file=File( + file_name="training_file.jsonl", + content=open("examples/fixtures/ft_training_file.jsonl", "rb").read(), + ) + ) + print(created_file) + + # List files + files = client.files.list() + print(files) + + # Retrieve a file + retrieved_file = client.files.retrieve(file_id=created_file.id) + print(retrieved_file) + + # Delete a file + deleted_file = client.files.delete(file_id=created_file.id) + print(deleted_file) + + +if __name__ == "__main__": + main() diff --git a/examples/mistral/jobs/fine_tuning.py b/examples/mistral/jobs/fine_tuning.py new file mode 100644 index 00000000..2d155cc2 --- /dev/null +++ b/examples/mistral/jobs/fine_tuning.py @@ -0,0 +1,53 @@ +#!/usr/bin/env python +import os + +from mistralai.client import Mistral +from mistralai.client.models import File, CompletionTrainingParametersIn + + +def main(): + api_key = os.environ["MISTRAL_API_KEY"] + + client = Mistral(api_key=api_key) + + # Create new files + with open("examples/fixtures/ft_training_file.jsonl", "rb") as f: + training_file = client.files.upload( + file=File(file_name="file.jsonl", content=f) + ) + with open("examples/fixtures/ft_validation_file.jsonl", "rb") as f: + validation_file = client.files.upload( + file=File(file_name="validation_file.jsonl", content=f) + ) + + # Create a new job + created_job = client.fine_tuning.jobs.create( + model="mistral-small-latest", + training_files=[{"file_id": training_file.id, "weight": 1}], + validation_files=[validation_file.id], + hyperparameters=CompletionTrainingParametersIn( + training_steps=1, + learning_rate=0.0001, + ), + ) + print(created_job) + + # List jobs + jobs = client.fine_tuning.jobs.list(page=0, page_size=5) + print(jobs) + + # Retrieve a job + retrieved_job = client.fine_tuning.jobs.get(job_id=created_job.id) + print(retrieved_job) + + # Cancel a job + canceled_job = client.fine_tuning.jobs.cancel(job_id=created_job.id) + print(canceled_job) + + # Delete files + client.files.delete(file_id=training_file.id) + client.files.delete(file_id=validation_file.id) + + +if __name__ == "__main__": + main() diff --git a/examples/mistral/jobs/fine_tuning_dry_run.py b/examples/mistral/jobs/fine_tuning_dry_run.py new file mode 100644 index 00000000..d0c6f733 --- /dev/null +++ b/examples/mistral/jobs/fine_tuning_dry_run.py @@ -0,0 +1,43 @@ +#!/usr/bin/env python + +import asyncio +import os + +from mistralai.client import Mistral +from mistralai.client.models import CompletionTrainingParametersIn + + +async def main(): + api_key = os.environ["MISTRAL_API_KEY"] + + client = Mistral(api_key=api_key) + + # Create new files + with open("examples/fixtures/ft_training_file.jsonl", "rb") as f: + training_file = await client.files.upload_async( + file={"file_name": "test-file.jsonl", "content": f} + ) + + # Create a new job + dry_run_job = await client.fine_tuning.jobs.create_async( + model="mistral-small-latest", + training_files=[{"file_id": training_file.id, "weight": 1}], + hyperparameters=CompletionTrainingParametersIn( + training_steps=1, + learning_rate=0.0001, + warmup_fraction=0.01, + ), + auto_start=False, + ) + + print("Dry run job created") + print(f"Job ID: {dry_run_job}") + print(f"Train tokens: {dry_run_job.trained_tokens}") + print(f"Dataset tokens: {dry_run_job.metadata.data_tokens}") + print(f"Epochs number: {dry_run_job.hyperparameters.epochs}") + print(f"Expected duration: {dry_run_job.metadata.expected_duration_seconds}") + print(f"Cost: {dry_run_job.metadata.cost} {dry_run_job.metadata.cost_currency}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/mistral/libraries/async_libraries.py b/examples/mistral/libraries/async_libraries.py new file mode 100644 index 00000000..fc5e6541 --- /dev/null +++ b/examples/mistral/libraries/async_libraries.py @@ -0,0 +1,57 @@ +#!/usr/bin/env python + +import os +import asyncio + +from mistralai.client import Mistral +from mistralai.client.models import File + + +async def main(): + api_key = os.environ["MISTRAL_API_KEY"] + + client = Mistral(api_key=api_key) + + # create new library + library = await client.beta.libraries.create_async(name="My API Library") + print(library) + + # Upload a new file + uploaded_file = await client.beta.libraries.documents.upload_async( + library_id=library.id, + file=File( + file_name="lorem_ipsum.md", + content=open("examples/fixtures/lorem_ipsum.md", "rb").read(), + ) + ) + print(uploaded_file) + + # List files + files = (await client.beta.libraries.documents.list_async(library_id=library.id)).data + print(files) + + # Retrieve a file + retrieved_file = await client.beta.libraries.documents.get_async(library_id=library.id, document_id=uploaded_file.id) + print(retrieved_file) + + # Retrieve a file content + retrieved_file_content = await client.beta.libraries.documents.text_content_async(library_id=library.id, document_id=uploaded_file.id) + print(retrieved_file_content) + + + # Rename a file + renamed_file = await client.beta.libraries.documents.update_async(library_id=library.id, document_id=uploaded_file.id, name="renamed_file.md") + print(renamed_file) + + # Delete a file + deleted_file = await client.beta.libraries.documents.delete_async(library_id=library.id, document_id=uploaded_file.id) + print(deleted_file) + + # Delete a library + deleted_library = await client.beta.libraries.delete_async(library_id=library.id) + print(deleted_library) + + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/mistral/libraries/libraries.py b/examples/mistral/libraries/libraries.py new file mode 100644 index 00000000..8e4b2998 --- /dev/null +++ b/examples/mistral/libraries/libraries.py @@ -0,0 +1,56 @@ +#!/usr/bin/env python + +import os + +from mistralai.client import Mistral +from mistralai.client.models import File + + +def main(): + api_key = os.environ["MISTRAL_API_KEY"] + + client = Mistral(api_key=api_key) + + # create new library + library = client.beta.libraries.create(name="My API Library") + print(library) + + # Upload a new file + uploaded_file = client.beta.libraries.documents.upload( + library_id=library.id, + file=File( + file_name="lorem_ipsum.md", + content=open("examples/fixtures/lorem_ipsum.md", "rb").read(), + ) + ) + print(uploaded_file) + + # List files + files = client.beta.libraries.documents.list(library_id=library.id).data + print(files) + + # Retrieve a file + retrieved_file = client.beta.libraries.documents.get(library_id=library.id, document_id=uploaded_file.id) + print(retrieved_file) + + # Retrieve a file content + retrieved_file_content = client.beta.libraries.documents.text_content(library_id=library.id, document_id=uploaded_file.id) + print(retrieved_file_content) + + + # Rename a file + renamed_file = client.beta.libraries.documents.update(library_id=library.id, document_id=uploaded_file.id, name="renamed_file.md") + print(renamed_file) + + # Delete a file + deleted_file = client.beta.libraries.documents.delete(library_id=library.id, document_id=uploaded_file.id) + print(deleted_file) + + # Delete a library + deleted_library = client.beta.libraries.delete(library_id=library.id) + print(deleted_library) + + + +if __name__ == "__main__": + main() diff --git a/examples/mistral/mcp_servers/sse_server.py b/examples/mistral/mcp_servers/sse_server.py new file mode 100644 index 00000000..66edb98b --- /dev/null +++ b/examples/mistral/mcp_servers/sse_server.py @@ -0,0 +1,32 @@ +import random +import threading +from contextlib import contextmanager + +from mcp.server.fastmcp import FastMCP +import logging + +logging.basicConfig(level=logging.ERROR) + +# Initialize FastMCP server +mcp = FastMCP("weather") + + +@mcp.tool() +async def get_weather(location: str) -> float: + return random.random() * 30 + + +def run_sse_server(): + mcp.run(transport="sse") + + +@contextmanager +def run_sse_server_in_background(): + """start the server in a new thread""" + thread = threading.Thread(target=run_sse_server, daemon=True) + thread.start() + yield thread + + +if __name__ == "__main__": + run_sse_server() diff --git a/examples/mistral/mcp_servers/stdio_server.py b/examples/mistral/mcp_servers/stdio_server.py new file mode 100644 index 00000000..f95ac8dc --- /dev/null +++ b/examples/mistral/mcp_servers/stdio_server.py @@ -0,0 +1,21 @@ +import random +from mcp.server.fastmcp import FastMCP +import logging + +logging.basicConfig(level=logging.ERROR) + +# Initialize FastMCP server +mcp = FastMCP("weather") + + +@mcp.tool() +async def get_weather(location: str) -> float: + return random.random() * 30 + + +def run_stdio_server(): + mcp.run(transport="stdio") + + +if __name__ == "__main__": + run_stdio_server() diff --git a/examples/mistral/models/async_list_models.py b/examples/mistral/models/async_list_models.py new file mode 100755 index 00000000..8b1ac503 --- /dev/null +++ b/examples/mistral/models/async_list_models.py @@ -0,0 +1,19 @@ +#!/usr/bin/env python + +import asyncio +import os + +from mistralai.client import Mistral + + +async def main(): + api_key = os.environ["MISTRAL_API_KEY"] + + client = Mistral(api_key=api_key) + + list_models_response = await client.models.list_async() + print(list_models_response) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/mistral/models/list_models.py b/examples/mistral/models/list_models.py new file mode 100755 index 00000000..9b68f806 --- /dev/null +++ b/examples/mistral/models/list_models.py @@ -0,0 +1,18 @@ +#!/usr/bin/env python + +import os + +from mistralai.client import Mistral + + +def main(): + api_key = os.environ["MISTRAL_API_KEY"] + + client = Mistral(api_key=api_key) + + list_models_response = client.models.list() + print(list_models_response) + + +if __name__ == "__main__": + main() diff --git a/examples/mistral/ocr/ocr_process_from_file.py b/examples/mistral/ocr/ocr_process_from_file.py new file mode 100644 index 00000000..9368ceeb --- /dev/null +++ b/examples/mistral/ocr/ocr_process_from_file.py @@ -0,0 +1,44 @@ +from mistralai.client import Mistral +import os +import json +from pathlib import Path +import urllib.request + +MIXTRAL_OF_EXPERTS_PDF_URL = "https://arxiv.org/pdf/2401.04088" +MOE_FILENAME = "mixtral_of_experts.pdf" + + +def main(): + api_key = os.environ["MISTRAL_API_KEY"] + client = Mistral(api_key=api_key) + pdf_file = Path(MOE_FILENAME) + + # Download the file if it doesn't exist + if not pdf_file.is_file(): + urllib.request.urlretrieve(MIXTRAL_OF_EXPERTS_PDF_URL, MOE_FILENAME) + + # Upload the file + uploaded_file = client.files.upload( + file={ + "file_name": pdf_file.stem, + "content": pdf_file.read_bytes(), + }, + purpose="ocr", + ) + + pdf_response = client.ocr.process(document={ + "type": "file", + "file_id": uploaded_file.id, + }, model="mistral-ocr-latest", include_image_base64=True) + + # Print the parsed PDF + response_dict = json.loads(pdf_response.model_dump_json()) + json_string = json.dumps(response_dict, indent=4) + print(json_string) + + # Remove the file + pdf_file.unlink() + + +if __name__ == "__main__": + main() diff --git a/examples/mistral/ocr/ocr_process_from_url.py b/examples/mistral/ocr/ocr_process_from_url.py new file mode 100644 index 00000000..4f3b0224 --- /dev/null +++ b/examples/mistral/ocr/ocr_process_from_url.py @@ -0,0 +1,31 @@ +import json +import os + +from mistralai.client import Mistral + +MISTRAL_7B_PDF_URL = "https://arxiv.org/pdf/2310.06825.pdf" + + +def main(): + api_key = os.environ["MISTRAL_API_KEY"] + client = Mistral(api_key=api_key) + + # Using an URL + pdf_response = client.ocr.process( + document={ + "document_url": MISTRAL_7B_PDF_URL, + "type": "document_url", + "document_name": "mistral-7b-pdf", + }, + model="mistral-ocr-latest", + include_image_base64=True, + ) + + # Print the parsed PDF + response_dict = json.loads(pdf_response.model_dump_json()) + json_string = json.dumps(response_dict, indent=4) + print(json_string) + + +if __name__ == "__main__": + main() diff --git a/packages/azure/.genignore b/packages/azure/.genignore new file mode 100644 index 00000000..6bdf6621 --- /dev/null +++ b/packages/azure/.genignore @@ -0,0 +1,6 @@ +pyproject.toml +src/mistralai/azure/client/sdk.py +src/mistralai/azure/client/_hooks/registration.py +README.md +USAGE.md +docs/sdks/**/README.md diff --git a/packages/azure/.gitattributes b/packages/azure/.gitattributes new file mode 100644 index 00000000..4d75d590 --- /dev/null +++ b/packages/azure/.gitattributes @@ -0,0 +1,2 @@ +# This allows generated code to be indexed correctly +*.py linguist-generated=false \ No newline at end of file diff --git a/packages/azure/.gitignore b/packages/azure/.gitignore new file mode 100644 index 00000000..b386de74 --- /dev/null +++ b/packages/azure/.gitignore @@ -0,0 +1,15 @@ +.env +.env.local +**/__pycache__/ +**/.speakeasy/temp/ +**/.speakeasy/logs/ +.speakeasy/reports +README-PYPI.md +.venv/ +venv/ +src/*.egg-info/ +__pycache__/ +.pytest_cache/ +.python-version +.DS_Store +pyrightconfig.json diff --git a/packages/azure/.speakeasy/gen.lock b/packages/azure/.speakeasy/gen.lock new file mode 100644 index 00000000..ad85f076 --- /dev/null +++ b/packages/azure/.speakeasy/gen.lock @@ -0,0 +1,866 @@ +lockVersion: 2.0.0 +id: dc40fa48-2c4d-46ad-ac8b-270749770f34 +management: + docChecksum: be10e9d3b72d49632e8d6d311b9b8193 + docVersion: 1.0.0 + speakeasyVersion: 1.729.0 + generationVersion: 2.841.0 + releaseVersion: 2.0.0rc1 + configChecksum: b06d8b151630e956c8b778cdc6c54c06 + repoURL: https://github.com/mistralai/client-python.git + repoSubDirectory: packages/azure + installationURL: https://github.com/mistralai/client-python.git#subdirectory=packages/azure + published: true +persistentEdits: + generation_id: f76a86de-7619-48b9-8987-e2ecf1378ceb + pristine_commit_hash: 8b77e997fc0b0da49806cf251be6f273cf32cdfb + pristine_tree_hash: 2ff5921bc10f855310c77650bdc4622a57fc63fb +features: + python: + additionalDependencies: 1.0.0 + additionalProperties: 1.0.1 + configurableModuleName: 0.2.0 + constsAndDefaults: 1.0.7 + core: 6.0.12 + defaultEnabledRetries: 0.2.0 + enumUnions: 0.1.0 + envVarSecurityUsage: 0.3.2 + examples: 3.0.2 + flatRequests: 1.0.1 + globalSecurity: 3.0.5 + globalSecurityCallbacks: 1.0.0 + globalSecurityFlattening: 1.0.0 + globalServerURLs: 3.2.0 + includes: 3.0.0 + methodArguments: 1.0.2 + nameOverrides: 3.0.3 + nullables: 1.0.2 + openEnums: 1.0.4 + responseFormat: 1.1.0 + retries: 3.0.4 + sdkHooks: 1.2.1 + serverEvents: 1.0.13 + serverEventsSentinels: 0.1.0 + serverIDs: 3.0.0 + unions: 3.1.4 +trackedFiles: + .gitattributes: + id: 24139dae6567 + last_write_checksum: sha1:53134de3ada576f37c22276901e1b5b6d85cd2da + pristine_git_object: 4d75d59008e4d8609876d263419a9dc56c8d6f3a + .vscode/settings.json: + id: 89aa447020cd + last_write_checksum: sha1:f84632c81029fcdda8c3b0c768d02b836fc80526 + pristine_git_object: 8d79f0abb72526f1fb34a4c03e5bba612c6ba2ae + docs/errors/httpvalidationerror.md: + id: 7fe2e5327e07 + last_write_checksum: sha1:277a46811144643262651853dc6176d21b33573e + pristine_git_object: 712a148c3e2305dca4c702851865f9f8c8e674cc + docs/models/arguments.md: + id: 7ea5e33709a7 + last_write_checksum: sha1:09eea126210d7fd0353e60a76bf1dbed173f13ec + pristine_git_object: 2e54e27e0ca97bee87918b2ae38cc6c335669a79 + docs/models/assistantmessage.md: + id: 7e0218023943 + last_write_checksum: sha1:47d5cd1a1bef9e398c12c207f5b3d8486d94f359 + pristine_git_object: 9ef638379aee1198742743800e778409c47a9b9d + docs/models/assistantmessagecontent.md: + id: 9f1795bbe642 + last_write_checksum: sha1:1ce4066623a8d62d969e5ed3a088d73a9ba26643 + pristine_git_object: 047b7cf95f4db203bf2c501680b73ca0562a122d + docs/models/chatcompletionchoice.md: + id: 0d15c59ab501 + last_write_checksum: sha1:a6274a39a4239e054816d08517bf8507cb5c4564 + pristine_git_object: deaa0ea073e1b6c21bd466c10db31db2464066f1 + docs/models/chatcompletionchoicefinishreason.md: + id: 225764da91d3 + last_write_checksum: sha1:b894d3408cb801e072c3c302a5676ff939d59284 + pristine_git_object: b2f15ecbe88328de95b4961ddb3940fd8a6ee64b + docs/models/chatcompletionrequest.md: + id: adffe90369d0 + last_write_checksum: sha1:00453565d70739471a4e1872c93b5b7e66fe6cb6 + pristine_git_object: f8715cd0a335c6dc0fda4b60400f11c4aa8a0a06 + docs/models/chatcompletionrequestmessage.md: + id: 3f5e170d418c + last_write_checksum: sha1:7921c5a508a9f88adc01caab34e26182b8035607 + pristine_git_object: 91e9e062d0ef0cb69235c4ae4516548733ce28a9 + docs/models/chatcompletionrequeststop.md: + id: fcaf5bbea451 + last_write_checksum: sha1:71a25f84f0d88c7acf72e801ced6159546201851 + pristine_git_object: 749296d420c0671d2a1d6d22483b51f577a86485 + docs/models/chatcompletionrequesttoolchoice.md: + id: b97041b2f15b + last_write_checksum: sha1:7ad7eb133f70e07d0d6a9def36aadd08b35cf861 + pristine_git_object: dc82a8ef91e7bfd44f1d2d9d9a4ef61b6e76cc34 + docs/models/chatcompletionresponse.md: + id: 7c53b24681b9 + last_write_checksum: sha1:a56581c0846638cfe6df26d3045fb4f874ccd931 + pristine_git_object: a0465ffbfc5558628953e03fbc53b80bbdc8649b + docs/models/chatcompletionstreamrequest.md: + id: cf8f29558a68 + last_write_checksum: sha1:7233a19b12f3204b8e2259a4a09d0d9726609e4e + pristine_git_object: cc82a8c707268084865f86d71be82de5ebf6f821 + docs/models/chatcompletionstreamrequestmessage.md: + id: 053a98476cd2 + last_write_checksum: sha1:8270692463fab1243d9de4bbef7162daa64e52c5 + pristine_git_object: 2e4e93acca8983a3ea27b391d4606518946e13fe + docs/models/chatcompletionstreamrequeststop.md: + id: d0e89a4dca78 + last_write_checksum: sha1:a889e9580fa94bda7c848682d6ba501b7f5c0f41 + pristine_git_object: a48460a92ac47fec1de2188ba46b238229736d32 + docs/models/chatcompletionstreamrequesttoolchoice.md: + id: 210d5e5b1413 + last_write_checksum: sha1:0543164caf3f4fb2bef3061dbd1a5e6b34b17ae9 + pristine_git_object: 43f3ca3809bf1a2a040e2ad7c19a2b22db0b73f8 + docs/models/completionchunk.md: + id: 60cb30423c60 + last_write_checksum: sha1:61b976fe2e71236cf7941ee1635decc31bd304b2 + pristine_git_object: 7f8ab5e631e2c6d1d9830325e591a7e434b83a35 + docs/models/completionevent.md: + id: e57cd17cb9dc + last_write_checksum: sha1:4f59c67af0b11c77b80d2b9c7aca36484d2be219 + pristine_git_object: 7a66e8fee2bb0f1c58166177653893bb05b98f1d + docs/models/completionresponsestreamchoice.md: + id: d56824d615a6 + last_write_checksum: sha1:0296a490df009dbfd04893fdebcc88dd6102a872 + pristine_git_object: 1532c25b8fc065d486f52d4610a7f757e5340875 + docs/models/completionresponsestreamchoicefinishreason.md: + id: 5f1fbfc90b8e + last_write_checksum: sha1:20824b4a223cbd3658b32440973a7d47dcd108b9 + pristine_git_object: 0fece473297227c75db4e7ded63417a2f117cac0 + docs/models/contentchunk.md: + id: d2d3a32080cd + last_write_checksum: sha1:5839a26cdc412b78caad7fb59df97bdcea57be6d + pristine_git_object: 22023e8b19692df969693b7a14f8cf6e0143859f + docs/models/deltamessage.md: + id: 6c5ed6b60968 + last_write_checksum: sha1:00052476b9b2474dbc149f18dd18c71c86d0fc74 + pristine_git_object: e0ee575f3fce7c312114ce8c5390efc5c4854952 + docs/models/deltamessagecontent.md: + id: 7307bedc8733 + last_write_checksum: sha1:a1211b8cb576ad1358e68983680ee326c3920a5e + pristine_git_object: 8142772d7ea33ad8a75cf9cf822564ba3f630de2 + docs/models/document.md: + id: cd1d2a444370 + last_write_checksum: sha1:d00a2ac808a0ae83a7b97da87e647ecc8dca9c52 + pristine_git_object: 509d43b733d68d462853d9eb52fc913c855dff40 + docs/models/documenturlchunk.md: + id: 48437d297408 + last_write_checksum: sha1:5f9294355929d66834c52c67990ba36a7f81387d + pristine_git_object: 9dbfbe5074de81b9fcf6f5bae8a0423fb2c82f71 + docs/models/filechunk.md: + id: edc076728e9d + last_write_checksum: sha1:07ab5db503211adba2fa099e66d12ac3c4bbf680 + pristine_git_object: 18217114060ac4e4b45fefabace4628684f27e5c + docs/models/format_.md: + id: a17c22228eda + last_write_checksum: sha1:dad6de59fec6378d50356007602e2a0254d8d2e4 + pristine_git_object: 97d286a4ed7cff0a4058bbfa06c4573428182876 + docs/models/function.md: + id: 416a80fba031 + last_write_checksum: sha1:a9485076d430a7753558461ce87bf42d09e34511 + pristine_git_object: b2bdb3fe82520ea79d0cf1a10ee41c844f90b859 + docs/models/functioncall.md: + id: a78cd1d7f605 + last_write_checksum: sha1:65bf78744b8531cdefb6a288f1af5cbf9d9e2395 + pristine_git_object: 7ccd90dca4868db9b6e178712f95d375210013c8 + docs/models/functionname.md: + id: 4b3bd62c0f26 + last_write_checksum: sha1:754fe32bdffe53c1057b302702f5516f4e551cfb + pristine_git_object: 87d7b4852de629015166605b273deb9341202dc0 + docs/models/imagedetail.md: + id: f8217529b496 + last_write_checksum: sha1:fdf19ac9459f64616240955cb81a84ef03e775c8 + pristine_git_object: 1e5ba3fd405a14e5e2872cc85504584dca19b726 + docs/models/imageurl.md: + id: e75dd23cec1d + last_write_checksum: sha1:a5cf621ce58a9cc7c96afa7de53367eac7b4cb0b + pristine_git_object: 6358e0acb2dea4816203413842243704ca955783 + docs/models/imageurlchunk.md: + id: 4407097bfff3 + last_write_checksum: sha1:da7a792f7b649f311062338dfbf3d25ff55fe6c5 + pristine_git_object: db0c53d22e29fa25222edb86b264e5135879a029 + docs/models/imageurlunion.md: + id: 9d3c691a9db0 + last_write_checksum: sha1:4e32bcd7d44746d2ddbfafbef96152bb2bdb2a15 + pristine_git_object: db97130f26199dcb354ecb7469d09530b035daa2 + docs/models/jsonschema.md: + id: a6b15ed6fac8 + last_write_checksum: sha1:523465666ad3c292252b3fe60f345c7ffb29053f + pristine_git_object: 7ff7c070353c58290416aff5b01d1dfc43905269 + docs/models/loc.md: + id: b071d5a509cc + last_write_checksum: sha1:09a04749333ab50ae806c3ac6adcaa90d54df0f1 + pristine_git_object: d6094ac2c6e0326c039dad2f6b89158694ef6aa7 + docs/models/mistralpromptmode.md: + id: d17d5db4d3b6 + last_write_checksum: sha1:abcb7205c5086169c7d9449d15ac142448a7d258 + pristine_git_object: c3409d03b9646e21a3793372d06dcae6fef95463 + docs/models/ocrimageobject.md: + id: b72f3c5853b2 + last_write_checksum: sha1:90c5158dec6a7b31c858677b6a8efa1e3cabd504 + pristine_git_object: 3c0d5544a80499b011467f29ef83d49f53801af6 + docs/models/ocrpagedimensions.md: + id: b3429f9883f5 + last_write_checksum: sha1:6435aa56e6153b0c90a546818ed780105ae1042a + pristine_git_object: c93ca64d5e20319ec6ec1bcb82b28c6ce0940f29 + docs/models/ocrpageobject.md: + id: 88a9e101b11e + last_write_checksum: sha1:091077fedf1b699d5160a21fe352056c247ef988 + pristine_git_object: 02473d44f73485fd7b7f0031d51bfac835d4036e + docs/models/ocrrequest.md: + id: 6862a3fc2d0f + last_write_checksum: sha1:eefa8ad80773e00ac297f3cf806704ac6ac3557d + pristine_git_object: 2d26c19fd1cecb234d7fb761dd73cc0a59e622ad + docs/models/ocrresponse.md: + id: 30042328fb78 + last_write_checksum: sha1:8e4a4ae404ea752f3e9f1108c2a5f89ed6cfb143 + pristine_git_object: 0a309317644eedc643009b6cec3a7dbb142b1a15 + docs/models/ocrtableobject.md: + id: c967796380e6 + last_write_checksum: sha1:3b78858cc130fc8792ec3d149c8f657fd3f7a4c3 + pristine_git_object: 4e27697c15983f86274648b2d7bacac557081630 + docs/models/ocrusageinfo.md: + id: 419abbb8353a + last_write_checksum: sha1:6e717a3f3de3c464e8b3237f06867cdfecec339e + pristine_git_object: d9d79125cb02bc2b09d8dc543a5e2d4a6c55571c + docs/models/prediction.md: + id: 3c70b2262201 + last_write_checksum: sha1:ca8a77219e6113f2358a5363e935288d90df0725 + pristine_git_object: fae3c1ca4ba2c2ddb3b7de401ecdc8d56dcc7740 + docs/models/referencechunk.md: + id: 07895f9debfd + last_write_checksum: sha1:4384049375a2566c7567599f97ce1ec19e9f6276 + pristine_git_object: d847e24845a399c7ca93d54701832fb65e01b3ab + docs/models/responseformat.md: + id: 50a1e4140614 + last_write_checksum: sha1:e877b2e81470ef5eec5675dfb91a47e74d5d3add + pristine_git_object: 5cab22f2bf1c412699f6a7ed18ef801ecbc3ee4b + docs/models/responseformats.md: + id: cf1f250b82db + last_write_checksum: sha1:105e1f9181913104b554051838cbdd0f728aa2c4 + pristine_git_object: 2f5f1e5511b048323fee18a0ffdd506fe2b3d56f + docs/models/security.md: + id: 452e4d4eb67a + last_write_checksum: sha1:ce2871b49c1632d50e22d0b1ebe4999021d52313 + pristine_git_object: c698674c513f5b20c04f629e50154e67977275f7 + docs/models/systemmessage.md: + id: fdb7963e1cdf + last_write_checksum: sha1:c7603c5ce77ba2bcbda9eff65eeafdb1e9ecbec7 + pristine_git_object: 10bda10f921fb5d66c1606ff18e654b4e78ab197 + docs/models/systemmessagecontent.md: + id: 94a56febaeda + last_write_checksum: sha1:6cb10b4b860b4204df57a29c650c85c826395aeb + pristine_git_object: 0c87baf3c2fade64a2738a9a4b3ce19647e5dc9a + docs/models/systemmessagecontentchunks.md: + id: cea1c19e9d7a + last_write_checksum: sha1:986aec0f8098158515bbccd0c22e0b3d4151bb32 + pristine_git_object: 40030c170746d9953d25b979ab7e6f522018e230 + docs/models/tableformat.md: + id: d8cd08c55c3c + last_write_checksum: sha1:e0736ea9576466d71821aa1e67fc632cc5a85414 + pristine_git_object: 54f029b814fdcfa2e93e2b8b0594ef9e4eab792a + docs/models/textchunk.md: + id: 6cd12e0ef110 + last_write_checksum: sha1:aa448d4937c0c1cd562621f0a9080aa0dc6e4bd1 + pristine_git_object: b266619dcb57222ec343f373c43b2b5cef5b8b93 + docs/models/thinkchunk.md: + id: bca24d7153f6 + last_write_checksum: sha1:2b8ff7737fa7255673ca31da7cb2e6803fce9e02 + pristine_git_object: b07f598ebc5f0e9c041186c081dc98bc21104bdb + docs/models/thinking.md: + id: 07234f8dd364 + last_write_checksum: sha1:a5962d1615b57996730da19e59fbfaa684321442 + pristine_git_object: c7a0d5c9811ea37aaf9e16b6e93c833ab979573f + docs/models/tool.md: + id: 8966139dbeed + last_write_checksum: sha1:1725bf53fc9f1ca3f332322d91de24c9d58adc6a + pristine_git_object: fb661f72887271d5bb470e4edf025a32b00ade17 + docs/models/toolcall.md: + id: 80892ea1a051 + last_write_checksum: sha1:cb27b9d36cfe6227978c7a7a01b1349b6bac99d9 + pristine_git_object: 3819236b9f3eee2f6878818cfbbe2817e97f7de2 + docs/models/toolchoice.md: + id: "097076343426" + last_write_checksum: sha1:25b33b34da02c3b46349dc8b6223f9ae18370d16 + pristine_git_object: 373046bbbc834169293b4f4ae8b2e238f952ddde + docs/models/toolchoiceenum.md: + id: 15410de51ffc + last_write_checksum: sha1:ca0cf9bf128bebc8faedd9333cc6a56b30f58130 + pristine_git_object: 0be3d6c54b13a8bf30773398a2c12e0d30d3ae58 + docs/models/toolmessage.md: + id: 0553747c37a1 + last_write_checksum: sha1:ac61e644ba7c6da607cb479eafd1db78d8e8012e + pristine_git_object: 7201481e61e269b238887deec30c03f7e16c53d7 + docs/models/toolmessagecontent.md: + id: f0522d2d3c93 + last_write_checksum: sha1:783769c0200baa1b6751327aa3e009fa83da72ee + pristine_git_object: 5c76091fbd2c8e0d768921fab19c7b761df73411 + docs/models/tooltypes.md: + id: adb50fe63ea2 + last_write_checksum: sha1:f224c3d8732450b9c969b3e04027b7df7892694c + pristine_git_object: 84e49253c9b9bd1bd314e2a126106404cbb52f16 + docs/models/usageinfo.md: + id: ec6fe65028a9 + last_write_checksum: sha1:cf71fb9676d870eba7c4d10a69636e1db4054adc + pristine_git_object: f5204ac94a4d6191839031c66c5a9bc0124a1f35 + docs/models/usermessage.md: + id: ed66d7a0f80b + last_write_checksum: sha1:f0ed7d9cb7264f1d9e4a9190772df3f15e25346c + pristine_git_object: e7a932ed71496fa7cc358388c650d25f166f27a4 + docs/models/usermessagecontent.md: + id: 52c072c851e8 + last_write_checksum: sha1:1de02bcf7082768ebe1bb912fdbebbec5a577b5a + pristine_git_object: 8350f9e8f8996c136093e38760990f62fd01f8cf + docs/models/utils/retryconfig.md: + id: 4343ac43161c + last_write_checksum: sha1:562c0f21e308ad10c27f85f75704c15592c6929d + pristine_git_object: 69dd549ec7f5f885101d08dd502e25748183aebf + docs/models/validationerror.md: + id: 304bdf06ef8b + last_write_checksum: sha1:1889f608099577e6a116c14b211a6811d6b22786 + pristine_git_object: 7a1654a1a5cfb3ab92360b361e8e962bf2db4582 + py.typed: + id: 258c3ed47ae4 + last_write_checksum: sha1:8efc425ffe830805ffcc0f3055871bdcdc542c60 + pristine_git_object: 3e38f1a929f7d6b1d6de74604aa87e3d8f010544 + pylintrc: + id: 7ce8b9f946e6 + last_write_checksum: sha1:8f871a5aac4b10bff724c9d91b8d7496eb1fbdde + pristine_git_object: 0391ac11bdc5526b697b69d047d568a611ce87d0 + scripts/prepare_readme.py: + id: e0c5957a6035 + last_write_checksum: sha1:26b29aad3c23a98912fd881698c976aac55749fe + pristine_git_object: 2b2577ea83873f64aa9f91d9d762bc6e1f250977 + scripts/publish.sh: + id: fe273b08f514 + last_write_checksum: sha1:b290b25b36dca3d5eb1a2e66a2e1bcf2e7326cf3 + pristine_git_object: c35748f360329c2bc370e9b189f49b1a360b2c48 + src/mistralai/azure/client/__init__.py: + id: 5624bda9196d + last_write_checksum: sha1:da077c0bdfcef64a4a5aea91a17292f72fa2b088 + pristine_git_object: 833c68cd526fe34aab2b7e7c45f974f7f4b9e120 + src/mistralai/azure/client/_hooks/__init__.py: + id: 850c237217cb + last_write_checksum: sha1:e3111289afd28ad557c21d9e2f918caabfb7037d + pristine_git_object: 2ee66cdd592fe41731c24ddd407c8ca31c50aec1 + src/mistralai/azure/client/_hooks/sdkhooks.py: + id: e9923767446c + last_write_checksum: sha1:ae162d6e73be0eb767c353c815d76b034395d50f + pristine_git_object: 2080681b7f2c52fcb80dcb95eff48654763e6258 + src/mistralai/azure/client/_hooks/types.py: + id: 07c892e06527 + last_write_checksum: sha1:fde2e0f6da6930232b67682009de520724b23398 + pristine_git_object: 3e4e39555d60adebe84e596c8323ee5b80676fc9 + src/mistralai/azure/client/_version.py: + id: a77160e60e5d + last_write_checksum: sha1:79494d1fb83ebffd4ad7b285299d4fea4e5a4e92 + pristine_git_object: 4d15bdc8f877a1616c6d1f978037f76e0487bd6c + src/mistralai/azure/client/basesdk.py: + id: 5a585a95ec21 + last_write_checksum: sha1:0c2e686aa42d6aeeb103193aa058d6ddff7bcf74 + pristine_git_object: 0d4d9a440e6c7726b6bc7fc6525aa3dc009847eb + src/mistralai/azure/client/chat.py: + id: c18454e628d7 + last_write_checksum: sha1:884e22b0e313662c67cec7101765d8d7ef0bc48a + pristine_git_object: 1051f9527851894988f7e1689923575cf72a0896 + src/mistralai/azure/client/errors/__init__.py: + id: f377703514d9 + last_write_checksum: sha1:36c516c11f8083c3380a72c1d0f0718a3345f24b + pristine_git_object: 79e2712c2e62121fb6dbaab15ca8487f0e16b07c + src/mistralai/azure/client/errors/httpvalidationerror.py: + id: c3ec0ad923e9 + last_write_checksum: sha1:f45b41c1ad980c5d481158209bf23fa795cc68bc + pristine_git_object: b4f2691e630a095ff09fbbce5e2ea3063592084f + src/mistralai/azure/client/errors/mistralazureerror.py: + id: fae868afae89 + last_write_checksum: sha1:25f4411c7411faad753d46118edf74828b1c9f7c + pristine_git_object: c5bf17528c7cf25bac8f8874f58692c601fcdd76 + src/mistralai/azure/client/errors/no_response_error.py: + id: b838df044e62 + last_write_checksum: sha1:7f326424a7d5ae1bcd5c89a0d6b3dbda9138942f + pristine_git_object: 1deab64bc43e1e65bf3c412d326a4032ce342366 + src/mistralai/azure/client/errors/responsevalidationerror.py: + id: 77ac5e93cdda + last_write_checksum: sha1:c1e045dbdda0199bc1d563819c0b38e877d0efef + pristine_git_object: 02397334d2b3bf2516808b69b2548564f650cbe0 + src/mistralai/azure/client/errors/sdkerror.py: + id: dfdd4b1d8928 + last_write_checksum: sha1:edc2baf6feb199e1b1ff1aad681622b44804299d + pristine_git_object: c4f3616cd2720a9b5d2a2c5b2d22a305629ebbe6 + src/mistralai/azure/client/httpclient.py: + id: 60c81037fbd0 + last_write_checksum: sha1:5e55338d6ee9f01ab648cad4380201a8a3da7dd7 + pristine_git_object: 89560b566073785535643e694c112bedbd3db13d + src/mistralai/azure/client/models/__init__.py: + id: "335011330e21" + last_write_checksum: sha1:07054ca95df60a3f03d8ea37a361aa506f94b78b + pristine_git_object: 908dda32cebe894b37dccaaa9b84db174ac93c21 + src/mistralai/azure/client/models/assistantmessage.py: + id: 353ed9110f97 + last_write_checksum: sha1:973979ac03f86f26ee9a540aaaa8f70a7011daca + pristine_git_object: e9ae6e82c3c758561c8c9663f27b2fd7e38d2911 + src/mistralai/azure/client/models/chatcompletionchoice.py: + id: 6942c7db5891 + last_write_checksum: sha1:817bfda6120a98248322c308629e404081e01279 + pristine_git_object: 67b5ba694217f4f3b95589d7f84af6a9bea9802d + src/mistralai/azure/client/models/chatcompletionrequest.py: + id: 0c711c870184 + last_write_checksum: sha1:ffdd11a4945dd805c9a73328749c2f4d9b6f80e6 + pristine_git_object: edd0fdc74a1b81f458d6083e79dc393e488da36a + src/mistralai/azure/client/models/chatcompletionresponse.py: + id: bdfacf065e9e + last_write_checksum: sha1:c72fb624e7475a551d37e0b291b64bcf772c402a + pristine_git_object: d41f9c6fab670cf7c961f50b1302f9a88cf48162 + src/mistralai/azure/client/models/chatcompletionstreamrequest.py: + id: da00a7feb4ef + last_write_checksum: sha1:8bb36693fed73a50d59687ca8b30a2c443708610 + pristine_git_object: 2edfbed98462eab43f322b9c706721365e410bb9 + src/mistralai/azure/client/models/completionchunk.py: + id: 28d620f25510 + last_write_checksum: sha1:84d1c55ef7bdb438e7f536a604a070799d054281 + pristine_git_object: 0e64bbc8aa0293c9d763db56287f296909260c38 + src/mistralai/azure/client/models/completionevent.py: + id: a6f00a747933 + last_write_checksum: sha1:3d04bfbdaf11c52af5613ed0fd70c8dbc59f6d49 + pristine_git_object: c4b272871d9b3ea8443f469d29b0825706c25c00 + src/mistralai/azure/client/models/completionresponsestreamchoice.py: + id: 3ba5d7ba8a13 + last_write_checksum: sha1:4de311509c71c8f582b2c767febea89f1acd341a + pristine_git_object: 20a271401ff98d69525947ab929078af83aab1f1 + src/mistralai/azure/client/models/contentchunk.py: + id: 1f65e4f8f731 + last_write_checksum: sha1:cf11e1f061d3c8af040ebbdba0b25d4177e1cea4 + pristine_git_object: 17efcc7d5825461576cf61257908688cffd23eb7 + src/mistralai/azure/client/models/deltamessage.py: + id: b7dab1d158de + last_write_checksum: sha1:190c2809d575244eda5efbb1e00a4ec5811aea29 + pristine_git_object: 567e772fc1b376efaec1a2dfd660bc74a916f8ee + src/mistralai/azure/client/models/documenturlchunk.py: + id: e56fec6e977f + last_write_checksum: sha1:0313d94f343d46dac7cc3adc392feaf06fa2b2a4 + pristine_git_object: 2dea80056f6752bdaa5d00f391cb6f54371a9d2b + src/mistralai/azure/client/models/filechunk.py: + id: 150d9f180110 + last_write_checksum: sha1:6d12d630a5bfd601836f9cb3d63b9eb2f15f880d + pristine_git_object: 6baa0cba81535e157c0f81ae2648362f7bd1adbd + src/mistralai/azure/client/models/function.py: + id: 6d1e2011a14b + last_write_checksum: sha1:b064eca9256966603581d41b5b2c08cd2448224d + pristine_git_object: 055d3657fd98da63b80deb8cd2054e95a0e66a2b + src/mistralai/azure/client/models/functioncall.py: + id: ced560a1bd57 + last_write_checksum: sha1:490cb3a0305994de063e06fa4c77defa911271f3 + pristine_git_object: d476792ccbb5aa2002deb870f1c81cc1500f59d4 + src/mistralai/azure/client/models/functionname.py: + id: 6f09474ebc85 + last_write_checksum: sha1:651ceed24416ce8192f70db03cc5cd0db685899f + pristine_git_object: 839e0d557a902da6c819210962e38e1df9bda90f + src/mistralai/azure/client/models/imagedetail.py: + id: de211988043d + last_write_checksum: sha1:812f2ec4fc0d8d13db643ed49192384d5a841aa4 + pristine_git_object: 2d074cee614e1c49b69ee4073c3aaaa7a5a2c9e2 + src/mistralai/azure/client/models/imageurl.py: + id: c8882341c798 + last_write_checksum: sha1:8c3c08cc5d33c66b12539270b7edbf157d936f86 + pristine_git_object: bcb4fe43d334752be501d694543250d7e632a9c7 + src/mistralai/azure/client/models/imageurlchunk.py: + id: b6f0abb574d7 + last_write_checksum: sha1:417618d9d2aba85386a100dfe818d13342830526 + pristine_git_object: 7213c49846a4107271d017dd695648d98c2efa94 + src/mistralai/azure/client/models/jsonschema.py: + id: bfd486f4bb18 + last_write_checksum: sha1:ccb2b53bd2351ec5119d9a7914a1a42c2746a096 + pristine_git_object: 99f2fb8903562465687edfd300d8efd373b92247 + src/mistralai/azure/client/models/mistralpromptmode.py: + id: d0028b1e4129 + last_write_checksum: sha1:46fe1ab8ac2d5867877368a59a4aa5be2fabadeb + pristine_git_object: 26e7adbdc4a981c92d51b72542c966b0ba0fb8f8 + src/mistralai/azure/client/models/ocrimageobject.py: + id: 9c9f987d94bb + last_write_checksum: sha1:423effee97a4120a26ba78c2abe7f6adeb5c733d + pristine_git_object: a23515b346a0f9517fec0b2381e1b0c04cb31816 + src/mistralai/azure/client/models/ocrpagedimensions.py: + id: 7669a25f32b3 + last_write_checksum: sha1:60642db6bb61f0e96204fb78d3aa0bd80dd0a7e5 + pristine_git_object: 12858da92de99aa6da9d6e148df3ba7ee37496c7 + src/mistralai/azure/client/models/ocrpageobject.py: + id: eea193b05126 + last_write_checksum: sha1:b8370ac0611dc3eccf09dddf85d1c39d3a11224b + pristine_git_object: 434c8988f124f93180e6cefa15b3aee067937946 + src/mistralai/azure/client/models/ocrrequest.py: + id: 365a5b4776a2 + last_write_checksum: sha1:e684da1b6db18cb9c5ce95b9cc58556e05a9ea9b + pristine_git_object: a2cd341593c9db3644076d39352abca6815efc56 + src/mistralai/azure/client/models/ocrresponse.py: + id: b8cde8c16a4c + last_write_checksum: sha1:55e81631f6fe57aaf58178460e1c5fc69fa19377 + pristine_git_object: 3dc09fd770a064e69e84519bd0f0c9127ebd8176 + src/mistralai/azure/client/models/ocrtableobject.py: + id: c2cd51b8789e + last_write_checksum: sha1:86a8fd2241cf6a636e81e58484a90bdb7880085e + pristine_git_object: f1de5428a71f9d42cd9f9e764d0bbf88f3aad8cc + src/mistralai/azure/client/models/ocrusageinfo.py: + id: 5e9118cac468 + last_write_checksum: sha1:97887b58cfe6ebd9ebd5905c6c7485525d6dc788 + pristine_git_object: f63315d23a1659aee4333b45c4239861aa5220d7 + src/mistralai/azure/client/models/prediction.py: + id: bd6abfa93083 + last_write_checksum: sha1:07d06d5629af183f999e043744a67868ef779bcc + pristine_git_object: 1fa1d78248628ccdc102ce0631d344150addfd2d + src/mistralai/azure/client/models/referencechunk.py: + id: c9612f854670 + last_write_checksum: sha1:e81e758e00db915e68f58ffa1e03b2c473f64477 + pristine_git_object: f7af9bf9a73e0d782e5e6c6a7866af6fbc3668d8 + src/mistralai/azure/client/models/responseformat.py: + id: c124e7c316aa + last_write_checksum: sha1:d368a2d4320356b6daab1dd0c62c6c862e902ca0 + pristine_git_object: 20fd2b868506cff278d1d7dc719eddd56ea538b0 + src/mistralai/azure/client/models/responseformats.py: + id: fef416cefcd4 + last_write_checksum: sha1:a212e85d286b5b49219f57d071a2232ff8b5263b + pristine_git_object: cbf83ce7b54ff8634f741334831807bfb5c98991 + src/mistralai/azure/client/models/security.py: + id: 4a2e4760ec08 + last_write_checksum: sha1:0cd2ae54cecd88cfd8d43e92c0d3da7efa48942c + pristine_git_object: 9b83ba98336090bed89fbeda40b4a07b212a1106 + src/mistralai/azure/client/models/systemmessage.py: + id: 8fa0dee9e4e1 + last_write_checksum: sha1:26167db704ece6ef1391d6f474e00f417bff4639 + pristine_git_object: d4bd004476ef653798295fa5df9de68b607f0132 + src/mistralai/azure/client/models/systemmessagecontentchunks.py: + id: 5918e770869d + last_write_checksum: sha1:d1f96498cbb540b91425e70ffa33892ff4d1c8cd + pristine_git_object: 8de71c909eda2ed0166a6be8f8ee029956e5766b + src/mistralai/azure/client/models/textchunk.py: + id: 9c81c76a6325 + last_write_checksum: sha1:28b8f4e030d365e5bf2f2f2720a7919b29616564 + pristine_git_object: 9295148588a143278ff5f48f9142347e35cfdab2 + src/mistralai/azure/client/models/thinkchunk.py: + id: df6bbd55b3eb + last_write_checksum: sha1:752a81be169fdd7a6afc293cf090b2cd4d2b22c9 + pristine_git_object: 4e881aad3b11d43aecaab922fe55bf7b4076c42f + src/mistralai/azure/client/models/tool.py: + id: 4075ef72c086 + last_write_checksum: sha1:4bef6d64b6426fdeff5031557c3c0e37f5c33b9a + pristine_git_object: 87329bdb73526120a3f63d48299114485a7fe038 + src/mistralai/azure/client/models/toolcall.py: + id: c65e6f79e539 + last_write_checksum: sha1:a3b36214b4533b79868630348762206a0e5ca26e + pristine_git_object: ada1ea65136fa58dce55f2857d895ea916bcd41f + src/mistralai/azure/client/models/toolchoice.py: + id: c25062b5de34 + last_write_checksum: sha1:6212c9366eb3b4f4062c86c00d4502dd03bf5ce1 + pristine_git_object: ddb9e1417c880c44a7f0505bfde839570fa3cd4a + src/mistralai/azure/client/models/toolchoiceenum.py: + id: cc06ba3a8d21 + last_write_checksum: sha1:3dbba9a58c5569aafe115f3f7713a52b01ad8620 + pristine_git_object: 01f6f677b379f9e3c99db9d1ad248cb0033a2804 + src/mistralai/azure/client/models/toolmessage.py: + id: 84ac736fa955 + last_write_checksum: sha1:e4ed14906985fe74fd76a9adb09125ebc1218a1f + pristine_git_object: 670210de0d05b52ee9dffbbb808a87e67c2d37a9 + src/mistralai/azure/client/models/tooltypes.py: + id: fa881b046d34 + last_write_checksum: sha1:cd28ddc02fff9a5abbb59c82fe9e0dcbdb9b6d2a + pristine_git_object: 1cce7446f2772b998208ea1c78c7969e3881d5d0 + src/mistralai/azure/client/models/usageinfo.py: + id: 3edc9c81b329 + last_write_checksum: sha1:0ac2350e4efa1ed3ffd7d33ac91c3ef564d1d773 + pristine_git_object: 0f04c87c97ff3148106408a46618c848b86c4b37 + src/mistralai/azure/client/models/usermessage.py: + id: 3796508adc07 + last_write_checksum: sha1:8eb35fb07971d74cf2cb0858c037558f52df6aa9 + pristine_git_object: 549b01ca887651a95c5efc8aff3372d32dfdc277 + src/mistralai/azure/client/models/validationerror.py: + id: f2b84813e2ea + last_write_checksum: sha1:f0f9706a5af2ac4f6b234e768fdd492bbdd8a18c + pristine_git_object: 817ecf7a56470369ccacd0f5e0bb739656a5f92c + src/mistralai/azure/client/ocr.py: + id: 5817c10c9297 + last_write_checksum: sha1:7666ca9f4596cee080952b2f4096bd4176051680 + pristine_git_object: b9270f6a52406d8a9bf02d90c24ae540da6dfb9d + src/mistralai/azure/client/py.typed: + id: e88369f116d2 + last_write_checksum: sha1:8efc425ffe830805ffcc0f3055871bdcdc542c60 + pristine_git_object: 3e38f1a929f7d6b1d6de74604aa87e3d8f010544 + src/mistralai/azure/client/sdkconfiguration.py: + id: 602f74633eed + last_write_checksum: sha1:163fe779949725d81181f39b70d6922fc2cb8099 + pristine_git_object: 919225f9bf2e4315f879f0da6c7f8b3e6157bd58 + src/mistralai/azure/client/types/__init__.py: + id: f79033f78412 + last_write_checksum: sha1:140ebdd01a46f92ffc710c52c958c4eba3cf68ed + pristine_git_object: fc76fe0c5505e29859b5d2bb707d48fd27661b8c + src/mistralai/azure/client/types/basemodel.py: + id: fd244927c80c + last_write_checksum: sha1:10d84aedeb9d35edfdadf2c3020caa1d24d8b584 + pristine_git_object: a9a640a1a7048736383f96c67c6290c86bf536ee + src/mistralai/azure/client/utils/__init__.py: + id: 26f1a707325b + last_write_checksum: sha1:3ad22a588864c93bd3a16605f669955b5f3b8053 + pristine_git_object: b488c2df1390b22be3050eee72832a91c76d5385 + src/mistralai/azure/client/utils/annotations.py: + id: bb1f6c189fdb + last_write_checksum: sha1:a4824ad65f730303e4e1e3ec1febf87b4eb46dbc + pristine_git_object: 12e0aa4f1151bb52474cc02e88397329b90703f6 + src/mistralai/azure/client/utils/datetimes.py: + id: 2b7db09ee0ab + last_write_checksum: sha1:c721e4123000e7dc61ec52b28a739439d9e17341 + pristine_git_object: a6c52cd61bbe2d459046c940ce5e8c469f2f0664 + src/mistralai/azure/client/utils/dynamic_imports.py: + id: 0ac779c122d9 + last_write_checksum: sha1:a1940c63feb8eddfd8026de53384baf5056d5dcc + pristine_git_object: 673edf82a97d0fea7295625d3e092ea369a36b79 + src/mistralai/azure/client/utils/enums.py: + id: ffbdb1917a68 + last_write_checksum: sha1:bc8c3c1285ae09ba8a094ee5c3d9c7f41fa1284d + pristine_git_object: 3324e1bc2668c54c4d5f5a1a845675319757a828 + src/mistralai/azure/client/utils/eventstreaming.py: + id: bdc37b70360c + last_write_checksum: sha1:ffa870a25a7e4e2015bfd7a467ccd3aa1de97f0e + pristine_git_object: f2052fc22d9fd6c663ba3dce019fe234ca37108b + src/mistralai/azure/client/utils/forms.py: + id: 51696122c557 + last_write_checksum: sha1:0ca31459b99f761fcc6d0557a0a38daac4ad50f4 + pristine_git_object: 1e550bd5c2c35d977ddc10f49d77c23cb12c158d + src/mistralai/azure/client/utils/headers.py: + id: e42840c8cb13 + last_write_checksum: sha1:7c6df233ee006332b566a8afa9ce9a245941d935 + pristine_git_object: 37864cbbbc40d1a47112bbfdd3ba79568fc8818a + src/mistralai/azure/client/utils/logger.py: + id: 9db88755a137 + last_write_checksum: sha1:f3fdb154a3f09b8cc43d74c7e9c02f899f8086e4 + pristine_git_object: b661aff65d38b77d035149699aea09b2785d2fc6 + src/mistralai/azure/client/utils/metadata.py: + id: 44f85bd3b2e2 + last_write_checksum: sha1:c6a560bd0c63ab158582f34dadb69433ea73b3d4 + pristine_git_object: 173b3e5ce658675c2f504222a56b3daaaa68107d + src/mistralai/azure/client/utils/queryparams.py: + id: ec1c03114156 + last_write_checksum: sha1:b94c3f314fd3da0d1d215afc2731f48748e2aa59 + pristine_git_object: c04e0db82b68eca041f2cb2614d748fbac80fd41 + src/mistralai/azure/client/utils/requestbodies.py: + id: 1030c47d624d + last_write_checksum: sha1:41e2d2d2d3ecc394c8122ca4d4b85e1c3e03f054 + pristine_git_object: 1de32b6d26f46590232f398fdba6ce0072f1659c + src/mistralai/azure/client/utils/retries.py: + id: d50ed6e400b2 + last_write_checksum: sha1:471372f5c5d1dd5583239c9cf3c75f1b636e5d87 + pristine_git_object: af07d4e941007af4213c5ec9047ef8a2fca04e5e + src/mistralai/azure/client/utils/security.py: + id: 1d35741ce5f1 + last_write_checksum: sha1:435dd8b180cefcd733e635b9fa45512da091d9c0 + pristine_git_object: 17996bd54b8624009802fbbdf30bcb4225b8dfed + src/mistralai/azure/client/utils/serializers.py: + id: a1f26d73c3ad + last_write_checksum: sha1:ce1d8d7f500a9ccba0aeca5057cee9c271f4dfd7 + pristine_git_object: 14321eb479de81d0d9580ec8291e0ff91bf29e57 + src/mistralai/azure/client/utils/unions.py: + id: 9abcc9913e3f + last_write_checksum: sha1:6e38049f323e0b5fb4bd0e88ab51ec447197ccb0 + pristine_git_object: a227f4e87be22fce682fcae5813b71835199ec5e + src/mistralai/azure/client/utils/unmarshal_json_response.py: + id: 947f4fc4db62 + last_write_checksum: sha1:75931131ff498a66a48cfb32dd9d5d61f2c9b4d1 + pristine_git_object: fe0c9b8ecabf8f89e363a050837582df40d67fb4 + src/mistralai/azure/client/utils/url.py: + id: 4976c88d0e3b + last_write_checksum: sha1:6479961baa90432ca25626f8e40a7bbc32e73b41 + pristine_git_object: c78ccbae426ce6d385709d97ce0b1c2813ea2418 + src/mistralai/azure/client/utils/values.py: + id: 3974a1553447 + last_write_checksum: sha1:acaa178a7c41ddd000f58cc691e4632d925b2553 + pristine_git_object: dae01a44384ac3bc13ae07453a053bf6c898ebe3 +examples: + stream_chat: + speakeasy-default-stream-chat: + requestBody: + application/json: {"model": "azureai", "stream": true, "messages": [{"role": "user", "content": "Who is the best French painter? Answer in one short sentence."}], "response_format": {"type": "text"}} + responses: + "422": + application/json: {} + chat_completion_v1_chat_completions_post: + speakeasy-default-chat-completion-v1-chat-completions-post: + requestBody: + application/json: {"model": "azureai", "stream": false, "messages": [{"role": "user", "content": "Who is the best French painter? Answer in one short sentence."}], "response_format": {"type": "text"}} + responses: + "200": + application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}, "created": 1702256327, "choices": []} + "422": + application/json: {} + ocr_v1_ocr_post: + speakeasy-default-ocr-v1-ocr-post: + requestBody: + application/json: {"model": "CX-9", "document": {"document_url": "https://upset-labourer.net/", "type": "document_url"}} + responses: + "200": + application/json: {"pages": [], "model": "Golf", "usage_info": {"pages_processed": 944919}} + "422": + application/json: {} + userExample: + requestBody: + application/json: {"model": "CX-9", "document": {"type": "document_url", "document_url": "https://upset-labourer.net/"}, "bbox_annotation_format": {"type": "text"}, "document_annotation_format": {"type": "text"}} + responses: + "200": + application/json: {"pages": [{"index": 1, "markdown": "# LEVERAGING UNLABELED DATA TO PREDICT OUT-OF-DISTRIBUTION PERFORMANCE\nSaurabh Garg*
Carnegie Mellon University
sgarg2@andrew.cmu.edu
Sivaraman Balakrishnan
Carnegie Mellon University
sbalakri@andrew.cmu.edu
Zachary C. Lipton
Carnegie Mellon University
zlipton@andrew.cmu.edu\n## Behnam Neyshabur\nGoogle Research, Blueshift team
neyshabur@google.com\nHanie Sedghi
Google Research, Brain team
hsedghi@google.com\n#### Abstract\nReal-world machine learning deployments are characterized by mismatches between the source (training) and target (test) distributions that may cause performance drops. In this work, we investigate methods for predicting the target domain accuracy using only labeled source data and unlabeled target data. We propose Average Thresholded Confidence (ATC), a practical method that learns a threshold on the model's confidence, predicting accuracy as the fraction of unlabeled examples for which model confidence exceeds that threshold. ATC outperforms previous methods across several model architectures, types of distribution shifts (e.g., due to synthetic corruptions, dataset reproduction, or novel subpopulations), and datasets (WILDS, ImageNet, BREEDS, CIFAR, and MNIST). In our experiments, ATC estimates target performance $2-4 \\times$ more accurately than prior methods. We also explore the theoretical foundations of the problem, proving that, in general, identifying the accuracy is just as hard as identifying the optimal predictor and thus, the efficacy of any method rests upon (perhaps unstated) assumptions on the nature of the shift. Finally, analyzing our method on some toy distributions, we provide insights concerning when it works ${ }^{1}$.\n## 1 INTRODUCTION\nMachine learning models deployed in the real world typically encounter examples from previously unseen distributions. While the IID assumption enables us to evaluate models using held-out data from the source distribution (from which training data is sampled), this estimate is no longer valid in presence of a distribution shift. Moreover, under such shifts, model accuracy tends to degrade (Szegedy et al., 2014; Recht et al., 2019; Koh et al., 2021). Commonly, the only data available to the practitioner are a labeled training set (source) and unlabeled deployment-time data which makes the problem more difficult. In this setting, detecting shifts in the distribution of covariates is known to be possible (but difficult) in theory (Ramdas et al., 2015), and in practice (Rabanser et al., 2018). However, producing an optimal predictor using only labeled source and unlabeled target data is well-known to be impossible absent further assumptions (Ben-David et al., 2010; Lipton et al., 2018).\nTwo vital questions that remain are: (i) the precise conditions under which we can estimate a classifier's target-domain accuracy; and (ii) which methods are most practically useful. To begin, the straightforward way to assess the performance of a model under distribution shift would be to collect labeled (target domain) examples and then to evaluate the model on that data. However, collecting fresh labeled data from the target distribution is prohibitively expensive and time-consuming, especially if the target distribution is non-stationary. Hence, instead of using labeled data, we aim to use unlabeled data from the target distribution, that is comparatively abundant, to predict model performance. Note that in this work, our focus is not to improve performance on the target but, rather, to estimate the accuracy on the target for a given classifier.\n[^0]: Work done in part while Saurabh Garg was interning at Google ${ }^{1}$ Code is available at [https://github.com/saurabhgarg1996/ATC_code](https://github.com/saurabhgarg1996/ATC_code).\n", "images": [], "dimensions": {"dpi": 200, "height": 2200, "width": 1700}}, {"index": 2, "markdown": "![img-0.jpeg](img-0.jpeg)\nFigure 1: Illustration of our proposed method ATC. Left: using source domain validation data, we identify a threshold on a score (e.g. negative entropy) computed on model confidence such that fraction of examples above the threshold matches the validation set accuracy. ATC estimates accuracy on unlabeled target data as the fraction of examples with the score above the threshold. Interestingly, this threshold yields accurate estimates on a wide set of target distributions resulting from natural and synthetic shifts. Right: Efficacy of ATC over previously proposed approaches on our testbed with a post-hoc calibrated model. To obtain errors on the same scale, we rescale all errors with Average Confidence (AC) error. Lower estimation error is better. See Table 1 for exact numbers and comparison on various types of distribution shift. See Sec. 5 for details on our testbed.\nRecently, numerous methods have been proposed for this purpose (Deng & Zheng, 2021; Chen et al., 2021b; Jiang et al., 2021; Deng et al., 2021; Guillory et al., 2021). These methods either require calibration on the target domain to yield consistent estimates (Jiang et al., 2021; Guillory et al., 2021) or additional labeled data from several target domains to learn a linear regression function on a distributional distance that then predicts model performance (Deng et al., 2021; Deng & Zheng, 2021; Guillory et al., 2021). However, methods that require calibration on the target domain typically yield poor estimates since deep models trained and calibrated on source data are not, in general, calibrated on a (previously unseen) target domain (Ovadia et al., 2019). Besides, methods that leverage labeled data from target domains rely on the fact that unseen target domains exhibit strong linear correlation with seen target domains on the underlying distance measure and, hence, can be rendered ineffective when such target domains with labeled data are unavailable (in Sec. 5.1 we demonstrate such a failure on a real-world distribution shift problem). Therefore, throughout the paper, we assume access to labeled source data and only unlabeled data from target domain(s).\nIn this work, we first show that absent assumptions on the source classifier or the nature of the shift, no method of estimating accuracy will work generally (even in non-contrived settings). To estimate accuracy on target domain perfectly, we highlight that even given perfect knowledge of the labeled source distribution (i.e., $p_{s}(x, y)$ ) and unlabeled target distribution (i.e., $p_{t}(x)$ ), we need restrictions on the nature of the shift such that we can uniquely identify the target conditional $p_{t}(y \\mid x)$. Thus, in general, identifying the accuracy of the classifier is as hard as identifying the optimal predictor.\nSecond, motivated by the superiority of methods that use maximum softmax probability (or logit) of a model for Out-Of-Distribution (OOD) detection (Hendrycks & Gimpel, 2016; Hendrycks et al., 2019), we propose a simple method that leverages softmax probability to predict model performance. Our method, Average Thresholded Confidence (ATC), learns a threshold on a score (e.g., maximum confidence or negative entropy) of model confidence on validation source data and predicts target domain accuracy as the fraction of unlabeled target points that receive a score above that threshold. ATC selects a threshold on validation source data such that the fraction of source examples that receive the score above the threshold match the accuracy of those examples. Our primary contribution in ATC is the proposal of obtaining the threshold and observing its efficacy on (practical) accuracy estimation. Importantly, our work takes a step forward in positively answering the question raised in Deng & Zheng (2021); Deng et al. (2021) about a practical strategy to select a threshold that enables accuracy prediction with thresholded model confidence.\n", "images": [{"id": "img-0.jpeg", "top_left_x": 292, "top_left_y": 217, "bottom_right_x": 1405, "bottom_right_y": 649, "image_base64": ""}], "dimensions": {"dpi": 200, "height": 2200, "width": 1700}}, {"index": 3, "markdown": "", "images": [], "dimensions": {"dpi": 539192, "height": 944919, "width": 247256}}, {"index": 27, "markdown": "![img-8.jpeg](img-8.jpeg)\nFigure 9: Scatter plot of predicted accuracy versus (true) OOD accuracy for vision datasets except MNIST with a ResNet50 model. Results reported by aggregating MAE numbers over 4 different seeds.\n", "images": [{"id": "img-8.jpeg", "top_left_x": 290, "top_left_y": 226, "bottom_right_x": 1405, "bottom_right_y": 1834, "image_base64": ""}], "dimensions": {"dpi": 200, "height": 2200, "width": 1700}}, {"index": 28, "markdown": "| Dataset | Shift | IM | | AC | | DOC | | GDE | ATC-MC (Ours) | | ATC-NE (Ours) | | | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | | | | Pre T | Post T | Pre T | Post T | Pre T | Post T | Post T | Pre T | Post T | Pre T | Post T | | CIFAR10 | Natural | 6.60 | 5.74 | 9.88 | 6.89 | 7.25 | 6.07 | 4.77 | 3.21 | 3.02 | 2.99 | 2.85 | | | | (0.35) | (0.30) | (0.16) | (0.13) | (0.15) | (0.16) | (0.13) | (0.49) | (0.40) | (0.37) | (0.29) | | | Synthetic | 12.33 | 10.20 | 16.50 | 11.91 | 13.87 | 11.08 | 6.55 | 4.65 | 4.25 | 4.21 | 3.87 | | | | (0.51) | (0.48) | (0.26) | (0.17) | (0.18) | (0.17) | (0.35) | (0.55) | (0.55) | (0.55) | (0.75) | | CIFAR100 | Synthetic | 13.69 | 11.51 | 23.61 | 13.10 | 14.60 | 10.14 | 9.85 | 5.50 | 4.75 | 4.72 | 4.94 | | | | (0.55) | (0.41) | (1.16) | (0.80) | (0.77) | (0.64) | (0.57) | (0.70) | (0.73) | (0.74) | (0.74) | | ImageNet200 | Natural | 12.37 | 8.19 | 22.07 | 8.61 | 15.17 | 7.81 | 5.13 | 4.37 | 2.04 | 3.79 | 1.45 | | | | (0.25) | (0.33) | (0.08) | (0.25) | (0.11) | (0.29) | (0.08) | (0.39) | (0.24) | (0.30) | (0.27) | | | Synthetic | 19.86 | 12.94 | 32.44 | 13.35 | 25.02 | 12.38 | 5.41 | 5.93 | 3.09 | 5.00 | 2.68 | | | | (1.38) | (1.81) | (1.00) | (1.30) | (1.10) | (1.38) | (0.89) | (1.38) | (0.87) | (1.28) | (0.45) | | ImageNet | Natural | 7.77 | 6.50 | 18.13 | 6.02 | 8.13 | 5.76 | 6.23 | 3.88 | 2.17 | 2.06 | 0.80 | | | | (0.27) | (0.33) | (0.23) | (0.34) | (0.27) | (0.37) | (0.41) | (0.53) | (0.62) | (0.54) | (0.44) | | | Synthetic | 13.39 | 10.12 | 24.62 | 8.51 | 13.55 | 7.90 | 6.32 | 3.34 | 2.53 | 2.61 | 4.89 | | | | (0.53) | (0.63) | (0.64) | (0.71) | (0.61) | (0.72) | (0.33) | (0.53) | (0.36) | (0.33) | (0.83) | | FMoW-WILDS | Natural | 5.53 | 4.31 | 33.53 | 12.84 | 5.94 | 4.45 | 5.74 | 3.06 | 2.70 | 3.02 | 2.72 | | | | (0.33) | (0.63) | (0.13) | (12.06) | (0.36) | (0.77) | (0.55) | (0.36) | (0.54) | (0.35) | (0.44) | | RxRx1-WILDS | Natural | 5.80 | 5.72 | 7.90 | 4.84 | 5.98 | 5.98 | 6.03 | 4.66 | 4.56 | 4.41 | 4.47 | | | | (0.17) | (0.15) | (0.24) | (0.09) | (0.15) | (0.13) | (0.08) | (0.38) | (0.38) | (0.31) | (0.26) | | Amazon-WILDS | Natural | 2.40 | 2.29 | 8.01 | 2.38 | 2.40 | 2.28 | 17.87 | 1.65 | 1.62 | 1.60 | 1.59 | | | | (0.08) | (0.09) | (0.53) | (0.17) | (0.09) | (0.09) | (0.18) | (0.06) | (0.05) | (0.14) | (0.15) | | CivilCom.-WILDS | Natural | 12.64 | 10.80 | 16.76 | 11.03 | 13.31 | 10.99 | 16.65 | | 7.14 | | | | | | (0.52) | (0.48) | (0.53) | (0.49) | (0.52) | (0.49) | (0.25) | | (0.41) | | | | MNIST | Natural | 18.48 | 15.99 | 21.17 | 14.81 | 20.19 | 14.56 | 24.42 | 5.02 | 2.40 | 3.14 | 3.50 | | | | (0.45) | (1.53) | (0.24) | (3.89) | (0.23) | (3.47) | (0.41) | (0.44) | (1.83) | (0.49) | (0.17) | | ENTITY-13 | Same | 16.23 | 11.14 | 24.97 | 10.88 | 19.08 | 10.47 | 10.71 | 5.39 | 3.88 | 4.58 | 4.19 | | | | (0.77) | (0.65) | (0.70) | (0.77) | (0.65) | (0.72) | (0.74) | (0.92) | (0.61) | (0.85) | (0.16) | | | Novel | 28.53 | 22.02 | 38.33 | 21.64 | 32.43 | 21.22 | 20.61 | 13.58 | 10.28 | 12.25 | 6.63 | | | | (0.82) | (0.68) | (0.75) | (0.86) | (0.69) | (0.80) | (0.60) | (1.15) | (1.34) | (1.21) | (0.93) | | ENTITY-30 | Same | 18.59 | 14.46 | 28.82 | 14.30 | 21.63 | 13.46 | 12.92 | 9.12 | 7.75 | 8.15 | 7.64 | | | | (0.51) | (0.52) | (0.43) | (0.71) | (0.37) | (0.59) | (0.14) | (0.62) | (0.72) | (0.68) | (0.88) | | | Novel | 32.34 | 26.85 | 44.02 | 26.27 | 36.82 | 25.42 | 23.16 | 17.75 | 14.30 | 15.60 | 10.57 | | | | (0.60) | (0.58) | (0.56) | (0.79) | (0.47) | (0.68) | (0.12) | (0.76) | (0.85) | (0.86) | (0.86) | | NONLIVING-26 | Same | 18.66 | 17.17 | 26.39 | 16.14 | 19.86 | 15.58 | 16.63 | 10.87 | 10.24 | 10.07 | 10.26 | | | | (0.76) | (0.74) | (0.82) | (0.81) | (0.67) | (0.76) | (0.45) | (0.98) | (0.83) | (0.92) | (1.18) | | | Novel | 33.43 | 31.53 | 41.66 | 29.87 | 35.13 | 29.31 | 29.56 | 21.70 | 20.12 | 19.08 | 18.26 | | | | (0.67) | (0.65) | (0.67) | (0.71) | (0.54) | (0.64) | (0.21) | (0.86) | (0.75) | (0.82) | (1.12) | | LIVING-17 | Same | 12.63 | 11.05 | 18.32 | 10.46 | 14.43 | 10.14 | 9.87 | 4.57 | 3.95 | 3.81 | 4.21 | | | | (1.25) | (1.20) | (1.01) | (1.12) | (1.11) | (1.16) | (0.61) | (0.71) | (0.48) | (0.22) | (0.53) | | | Novel | 29.03 | 26.96 | 35.67 | 26.11 | 31.73 | 25.73 | 23.53 | 16.15 | 14.49 | 12.97 | 11.39 | | | | (1.44) | (1.38) | (1.09) | (1.27) | (1.19) | (1.35) | (0.52) | (1.36) | (1.46) | (1.52) | (1.72) |\nTable 3: Mean Absolute estimation Error (MAE) results for different datasets in our setup grouped by the nature of shift. 'Same' refers to same subpopulation shifts and 'Novel' refers novel subpopulation shifts. We include details about the target sets considered in each shift in Table 2. Post T denotes use of TS calibration on source. For language datasets, we use DistilBERT-base-uncased, for vision dataset we report results with DenseNet model with the exception of MNIST where we use FCN. Across all datasets, we observe that ATC achieves superior performance (lower MAE is better). For GDE post T and pre T estimates match since TS doesn't alter the argmax prediction. Results reported by aggregating MAE numbers over 4 different seeds. Values in parenthesis (i.e., $(\\cdot)$ ) denote standard deviation values.\n", "images": [], "dimensions": {"dpi": 200, "height": 2200, "width": 1700}}, {"index": 29, "markdown": "| Dataset | Shift | IM | | AC | | DOC | | GDE | ATC-MC (Ours) | | ATC-NE (Ours) | | | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | | | | Pre T | Post T | Pre T | Post T | Pre T | Post T | Post T | Pre T | Post T | Pre T | Post T | | CIFAR10 | Natural | 7.14 | 6.20 | 10.25 | 7.06 | 7.68 | 6.35 | 5.74 | 4.02 | 3.85 | 3.76 | 3.38 | | | | (0.14) | (0.11) | (0.31) | (0.33) | (0.28) | (0.27) | (0.25) | (0.38) | (0.30) | (0.33) | (0.32) | | | Synthetic | 12.62 | 10.75 | 16.50 | 11.91 | 13.93 | 11.20 | 7.97 | 5.66 | 5.03 | 4.87 | 3.63 | | | | (0.76) | (0.71) | (0.28) | (0.24) | (0.29) | (0.28) | (0.13) | (0.64) | (0.71) | (0.71) | (0.62) | | CIFAR100 | Synthetic | 12.77 | 12.34 | 16.89 | 12.73 | 11.18 | 9.63 | 12.00 | 5.61 | 5.55 | 5.65 | 5.76 | | | | (0.43) | (0.68) | (0.20) | (2.59) | (0.35) | (1.25) | (0.48) | (0.51) | (0.55) | (0.35) | (0.27) | | ImageNet200 | Natural | 12.63 | 7.99 | 23.08 | 7.22 | 15.40 | 6.33 | 5.00 | 4.60 | 1.80 | 4.06 | 1.38 | | | | (0.59) | (0.47) | (0.31) | (0.22) | (0.42) | (0.24) | (0.36) | (0.63) | (0.17) | (0.69) | (0.29) | | | Synthetic | 20.17 | 11.74 | 33.69 | 9.51 | 25.49 | 8.61 | 4.19 | 5.37 | 2.78 | 4.53 | 3.58 | | | | (0.74) | (0.80) | (0.73) | (0.51) | (0.66) | (0.50) | (0.14) | (0.88) | (0.23) | (0.79) | (0.33) | | ImageNet | Natural | 8.09 | 6.42 | 21.66 | 5.91 | 8.53 | 5.21 | 5.90 | 3.93 | 1.89 | 2.45 | 0.73 | | | | (0.25) | (0.28) | (0.38) | (0.22) | (0.26) | (0.25) | (0.44) | (0.26) | (0.21) | (0.16) | (0.10) | | | Synthetic | 13.93 | 9.90 | 28.05 | 7.56 | 13.82 | 6.19 | 6.70 | 3.33 | 2.55 | 2.12 | 5.06 | | | | (0.14) | (0.23) | (0.39) | (0.13) | (0.31) | (0.07) | (0.52) | (0.25) | (0.25) | (0.31) | (0.27) | | FMoW-WILDS | Natural | 5.15 | 3.55 | 34.64 | 5.03 | 5.58 | 3.46 | 5.08 | 2.59 | 2.33 | 2.52 | 2.22 | | | | (0.19) | (0.41) | (0.22) | (0.29) | (0.17) | (0.37) | (0.46) | (0.32) | (0.28) | (0.25) | (0.30) | | RxRx1-WILDS | Natural | 6.17 | 6.11 | 21.05 | 5.21 | 6.54 | 6.27 | 6.82 | 5.30 | 5.20 | 5.19 | 5.63 | | | | (0.20) | (0.24) | (0.31) | (0.18) | (0.21) | (0.20) | (0.31) | (0.30) | (0.44) | (0.43) | (0.55) | | Entity-13 | Same | 18.32 | 14.38 | 27.79 | 13.56 | 20.50 | 13.22 | 16.09 | 9.35 | 7.50 | 7.80 | 6.94 | | | | (0.29) | (0.53) | (1.18) | (0.58) | (0.47) | (0.58) | (0.84) | (0.79) | (0.65) | (0.62) | (0.71) | | | Novel | 28.82 | 24.03 | 38.97 | 22.96 | 31.66 | 22.61 | 25.26 | 17.11 | 13.96 | 14.75 | 9.94 | | | | (0.30) | (0.55) | (1.32) | (0.59) | (0.54) | (0.58) | (1.08) | (0.93) | (0.64) | (0.78) | | | Entity-30 | Same | 16.91 | 14.61 | 26.84 | 14.37 | 18.60 | 13.11 | 13.74 | 8.54 | 7.94 | 7.77 | 8.04 | | | | (1.33) | (1.11) | (2.15) | (1.34) | (1.69) | (1.30) | (1.07) | (1.47) | (1.38) | (1.44) | (1.51) | | | Novel | 28.66 | 25.83 | 39.21 | 25.03 | 30.95 | 23.73 | 23.15 | 15.57 | 13.24 | 12.44 | 11.05 | | | | (1.16) | (0.88) | (2.03) | (1.11) | (1.64) | (1.11) | (0.51) | (1.44) | (1.15) | (1.26) | (1.13) | | NonLIVING-26 | Same | 17.43 | 15.95 | 27.70 | 15.40 | 18.06 | 14.58 | 16.99 | 10.79 | 10.13 | 10.05 | 10.29 | | | | (0.90) | (0.86) | (0.90) | (0.69) | (1.00) | (0.78) | (1.25) | (0.62) | (0.32) | (0.46) | (0.79) | | | Novel | 29.51 | 27.75 | 40.02 | 26.77 | 30.36 | 25.93 | 27.70 | 19.64 | 17.75 | 16.90 | 15.69 | | | | (0.86) | (0.82) | (0.76) | (0.82) | (0.95) | (0.80) | (1.42) | (0.68) | (0.53) | (0.60) | (0.83) | | LIVING-17 | Same | 14.28 | 12.21 | 23.46 | 11.16 | 15.22 | 10.78 | 10.49 | 4.92 | 4.23 | 4.19 | 4.73 | | | | (0.96) | (0.93) | (1.16) | (0.90) | (0.96) | (0.99) | (0.97) | (0.57) | (0.42) | (0.35) | (0.24) | | | Novel | 28.91 | 26.35 | 38.62 | 24.91 | 30.32 | 24.52 | 22.49 | 15.42 | 13.02 | 12.29 | 10.34 | | | | (0.66) | (0.73) | (1.01) | (0.61) | (0.59) | (0.74) | (0.85) | (0.59) | (0.53) | (0.73) | (0.62) |\nTable 4: Mean Absolute estimation Error (MAE) results for different datasets in our setup grouped by the nature of shift for ResNet model. 'Same' refers to same subpopulation shifts and 'Novel' refers novel subpopulation shifts. We include details about the target sets considered in each shift in Table 2. Post T denotes use of TS calibration on source. Across all datasets, we observe that ATC achieves superior performance (lower MAE is better). For GDE post T and pre T estimates match since TS doesn't alter the argmax prediction. Results reported by aggregating MAE numbers over 4 different seeds. Values in parenthesis (i.e., $(\\cdot)$ ) denote standard deviation values.\n", "images": [], "dimensions": {"dpi": 200, "height": 2200, "width": 1700}}], "model": "mistral-ocr-2503-completion", "usage_info": {"pages_processed": 29, "doc_size_bytes": null}} +examplesVersion: 1.0.2 +generatedTests: {} +generatedFiles: + - .gitattributes + - .vscode/settings.json + - docs/models/arguments.md + - docs/models/assistantmessage.md + - docs/models/assistantmessagecontent.md + - docs/models/assistantmessagerole.md + - docs/models/chatcompletionchoice.md + - docs/models/chatcompletionchoicefinishreason.md + - docs/models/chatcompletionrequest.md + - docs/models/chatcompletionrequestmessages.md + - docs/models/chatcompletionrequeststop.md + - docs/models/chatcompletionrequesttoolchoice.md + - docs/models/chatcompletionresponse.md + - docs/models/chatcompletionstreamrequest.md + - docs/models/chatcompletionstreamrequesttoolchoice.md + - docs/models/completionchunk.md + - docs/models/completionevent.md + - docs/models/completionresponsestreamchoice.md + - docs/models/content.md + - docs/models/contentchunk.md + - docs/models/deltamessage.md + - docs/models/document.md + - docs/models/documenturlchunk.md + - docs/models/documenturlchunktype.md + - docs/models/filechunk.md + - docs/models/finishreason.md + - docs/models/function.md + - docs/models/functioncall.md + - docs/models/functionname.md + - docs/models/httpvalidationerror.md + - docs/models/imageurl.md + - docs/models/imageurlchunk.md + - docs/models/imageurlchunkimageurl.md + - docs/models/imageurlchunktype.md + - docs/models/jsonschema.md + - docs/models/loc.md + - docs/models/messages.md + - docs/models/mistralpromptmode.md + - docs/models/ocrimageobject.md + - docs/models/ocrpagedimensions.md + - docs/models/ocrpageobject.md + - docs/models/ocrrequest.md + - docs/models/ocrresponse.md + - docs/models/ocrusageinfo.md + - docs/models/prediction.md + - docs/models/referencechunk.md + - docs/models/referencechunktype.md + - docs/models/responseformat.md + - docs/models/responseformats.md + - docs/models/role.md + - docs/models/security.md + - docs/models/stop.md + - docs/models/systemmessage.md + - docs/models/systemmessagecontent.md + - docs/models/systemmessagecontentchunks.md + - docs/models/textchunk.md + - docs/models/thinkchunk.md + - docs/models/thinkchunktype.md + - docs/models/thinking.md + - docs/models/tool.md + - docs/models/toolcall.md + - docs/models/toolchoice.md + - docs/models/toolchoiceenum.md + - docs/models/toolmessage.md + - docs/models/toolmessagecontent.md + - docs/models/toolmessagerole.md + - docs/models/tooltypes.md + - docs/models/type.md + - docs/models/usageinfo.md + - docs/models/usermessage.md + - docs/models/usermessagecontent.md + - docs/models/usermessagerole.md + - docs/models/utils/retryconfig.md + - docs/models/validationerror.md + - py.typed + - pylintrc + - scripts/prepare_readme.py + - scripts/publish.sh + - src/mistralai_azure/__init__.py + - src/mistralai_azure/_hooks/__init__.py + - src/mistralai_azure/_hooks/sdkhooks.py + - src/mistralai_azure/_hooks/types.py + - src/mistralai_azure/_version.py + - src/mistralai_azure/basesdk.py + - src/mistralai_azure/chat.py + - src/mistralai_azure/httpclient.py + - src/mistralai_azure/models/__init__.py + - src/mistralai_azure/models/assistantmessage.py + - src/mistralai_azure/models/chatcompletionchoice.py + - src/mistralai_azure/models/chatcompletionrequest.py + - src/mistralai_azure/models/chatcompletionresponse.py + - src/mistralai_azure/models/chatcompletionstreamrequest.py + - src/mistralai_azure/models/completionchunk.py + - src/mistralai_azure/models/completionevent.py + - src/mistralai_azure/models/completionresponsestreamchoice.py + - src/mistralai_azure/models/contentchunk.py + - src/mistralai_azure/models/deltamessage.py + - src/mistralai_azure/models/documenturlchunk.py + - src/mistralai_azure/models/filechunk.py + - src/mistralai_azure/models/function.py + - src/mistralai_azure/models/functioncall.py + - src/mistralai_azure/models/functionname.py + - src/mistralai_azure/models/httpvalidationerror.py + - src/mistralai_azure/models/imageurl.py + - src/mistralai_azure/models/imageurlchunk.py + - src/mistralai_azure/models/jsonschema.py + - src/mistralai_azure/models/mistralazureerror.py + - src/mistralai_azure/models/mistralpromptmode.py + - src/mistralai_azure/models/no_response_error.py + - src/mistralai_azure/models/ocrimageobject.py + - src/mistralai_azure/models/ocrpagedimensions.py + - src/mistralai_azure/models/ocrpageobject.py + - src/mistralai_azure/models/ocrrequest.py + - src/mistralai_azure/models/ocrresponse.py + - src/mistralai_azure/models/ocrusageinfo.py + - src/mistralai_azure/models/prediction.py + - src/mistralai_azure/models/referencechunk.py + - src/mistralai_azure/models/responseformat.py + - src/mistralai_azure/models/responseformats.py + - src/mistralai_azure/models/responsevalidationerror.py + - src/mistralai_azure/models/sdkerror.py + - src/mistralai_azure/models/security.py + - src/mistralai_azure/models/systemmessage.py + - src/mistralai_azure/models/systemmessagecontentchunks.py + - src/mistralai_azure/models/textchunk.py + - src/mistralai_azure/models/thinkchunk.py + - src/mistralai_azure/models/tool.py + - src/mistralai_azure/models/toolcall.py + - src/mistralai_azure/models/toolchoice.py + - src/mistralai_azure/models/toolchoiceenum.py + - src/mistralai_azure/models/toolmessage.py + - src/mistralai_azure/models/tooltypes.py + - src/mistralai_azure/models/usageinfo.py + - src/mistralai_azure/models/usermessage.py + - src/mistralai_azure/models/validationerror.py + - src/mistralai_azure/ocr.py + - src/mistralai_azure/py.typed + - src/mistralai_azure/sdkconfiguration.py + - src/mistralai_azure/types/__init__.py + - src/mistralai_azure/types/basemodel.py + - src/mistralai_azure/utils/__init__.py + - src/mistralai_azure/utils/annotations.py + - src/mistralai_azure/utils/datetimes.py + - src/mistralai_azure/utils/enums.py + - src/mistralai_azure/utils/eventstreaming.py + - src/mistralai_azure/utils/forms.py + - src/mistralai_azure/utils/headers.py + - src/mistralai_azure/utils/logger.py + - src/mistralai_azure/utils/metadata.py + - src/mistralai_azure/utils/queryparams.py + - src/mistralai_azure/utils/requestbodies.py + - src/mistralai_azure/utils/retries.py + - src/mistralai_azure/utils/security.py + - src/mistralai_azure/utils/serializers.py + - src/mistralai_azure/utils/unmarshal_json_response.py + - src/mistralai_azure/utils/url.py + - src/mistralai_azure/utils/values.py diff --git a/packages/azure/.speakeasy/gen.yaml b/packages/azure/.speakeasy/gen.yaml new file mode 100644 index 00000000..518e1e25 --- /dev/null +++ b/packages/azure/.speakeasy/gen.yaml @@ -0,0 +1,88 @@ +configVersion: 2.0.0 +generation: + sdkClassName: MistralAzure + maintainOpenAPIOrder: true + usageSnippets: + optionalPropertyRendering: withExample + sdkInitStyle: constructor + useClassNamesForArrayFields: true + fixes: + nameResolutionDec2023: true + nameResolutionFeb2025: true + parameterOrderingFeb2024: true + requestResponseComponentNamesFeb2024: true + securityFeb2025: true + sharedErrorComponentsApr2025: true + sharedNestedComponentsJan2026: true + nameOverrideFeb2026: true + methodSignaturesApr2024: true + auth: + oAuth2ClientCredentialsEnabled: true + oAuth2PasswordEnabled: false + hoistGlobalSecurity: true + schemas: + allOfMergeStrategy: shallowMerge + requestBodyFieldName: "" + versioningStrategy: automatic + persistentEdits: {} + tests: + generateTests: true + generateNewTests: false + skipResponseBodyAssertions: false +python: + version: 2.0.0rc1 + additionalDependencies: + dev: + pytest: ^8.2.2 + pytest-asyncio: ^0.23.7 + main: {} + allowedRedefinedBuiltins: + - id + - object + - input + - dir + asyncMode: both + authors: + - Mistral + baseErrorName: MistralAzureError + clientServerStatusCodesAsErrors: true + constFieldCasing: normal + defaultErrorName: SDKError + description: Python Client SDK for the Mistral AI API in Azure. + enableCustomCodeRegions: false + enumFormat: union + fixFlags: + asyncPaginationSep2025: true + conflictResistantModelImportsFeb2026: true + responseRequiredSep2024: true + flatAdditionalProperties: true + flattenGlobalSecurity: true + flattenRequests: true + flatteningOrder: parameters-first + forwardCompatibleEnumsByDefault: true + forwardCompatibleUnionsByDefault: tagged-only + imports: + option: openapi + paths: + callbacks: "" + errors: errors + operations: "" + shared: "" + webhooks: "" + inferUnionDiscriminators: true + inputModelSuffix: input + license: "" + maxMethodParams: 999 + methodArguments: infer-optional-args + moduleName: mistralai.azure.client + multipartArrayFormat: standard + outputModelSuffix: output + packageManager: uv + packageName: mistralai-azure + preApplyUnionDiscriminators: true + pytestFilterWarnings: [] + pytestTimeout: 0 + responseFormat: flat + sseFlatResponse: false + templateVersion: v2 + useAsyncHooks: false diff --git a/packages/azure/CONTRIBUTING.md b/packages/azure/CONTRIBUTING.md new file mode 100644 index 00000000..d585717f --- /dev/null +++ b/packages/azure/CONTRIBUTING.md @@ -0,0 +1,26 @@ +# Contributing to This Repository + +Thank you for your interest in contributing to this repository. Please note that this repository contains generated code. As such, we do not accept direct changes or pull requests. Instead, we encourage you to follow the guidelines below to report issues and suggest improvements. + +## How to Report Issues + +If you encounter any bugs or have suggestions for improvements, please open an issue on GitHub. When reporting an issue, please provide as much detail as possible to help us reproduce the problem. This includes: + +- A clear and descriptive title +- Steps to reproduce the issue +- Expected and actual behavior +- Any relevant logs, screenshots, or error messages +- Information about your environment (e.g., operating system, software versions) + - For example can be collected using the `npx envinfo` command from your terminal if you have Node.js installed + +## Issue Triage and Upstream Fixes + +We will review and triage issues as quickly as possible. Our goal is to address bugs and incorporate improvements in the upstream source code. Fixes will be included in the next generation of the generated code. + +## Contact + +If you have any questions or need further assistance, please feel free to reach out by opening an issue. + +Thank you for your understanding and cooperation! + +The Maintainers diff --git a/packages/azure/README.md b/packages/azure/README.md new file mode 100644 index 00000000..6eff040f --- /dev/null +++ b/packages/azure/README.md @@ -0,0 +1,464 @@ +# Mistral on Azure Python Client + +## SDK Installation + +PIP +```bash +pip install mistralai +``` + +UV +```bash +uv add mistralai +``` + +**Prerequisites** + +Before you begin, ensure you have `AZURE_ENDPOINT` and an `AZURE_API_KEY`. To obtain these, you will need to deploy Mistral on Azure AI. +See [instructions for deploying Mistral on Azure AI here](https://docs.mistral.ai/deployment/cloud/azure/). + + +## SDK Example Usage + +### Create Chat Completions + +This example shows how to create chat completions. + +The SDK automatically injects the `api-version` query parameter. + +```python +# Synchronous Example +from mistralai.azure.client import MistralAzure +import os + +AZURE_API_KEY = os.environ["AZURE_API_KEY"] +AZURE_ENDPOINT = os.environ["AZURE_ENDPOINT"] +AZURE_MODEL = os.environ["AZURE_MODEL"] +AZURE_API_VERSION = os.environ.get("AZURE_API_VERSION", "2024-05-01-preview") + +# The SDK automatically injects api-version as a query parameter +s = MistralAzure( + api_key=AZURE_API_KEY, + server_url=AZURE_ENDPOINT, + api_version=AZURE_API_VERSION, +) + +res = s.chat.complete( + messages=[ + { + "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", + }, + ], + model=AZURE_MODEL, +) + +if res is not None: + # handle response + print(res.choices[0].message.content) +``` + +
+ +The same SDK client can also be used to make asynchronous requests by importing asyncio. +```python +# Asynchronous Example +import asyncio +import os +from mistralai.azure.client import MistralAzure + +AZURE_API_KEY = os.environ["AZURE_API_KEY"] +AZURE_ENDPOINT = os.environ["AZURE_ENDPOINT"] +AZURE_MODEL = os.environ["AZURE_MODEL"] +AZURE_API_VERSION = os.environ.get("AZURE_API_VERSION", "2024-05-01-preview") + +async def main(): + # The SDK automatically injects api-version as a query parameter + s = MistralAzure( + api_key=AZURE_API_KEY, + server_url=AZURE_ENDPOINT, + api_version=AZURE_API_VERSION, + ) + res = await s.chat.complete_async( + messages=[ + { + "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", + }, + ], + model=AZURE_MODEL, + ) + if res is not None: + # handle response + print(res.choices[0].message.content) + +asyncio.run(main()) +``` + + + +## Available Resources and Operations + +### [chat](docs/sdks/chat/README.md) + +* [stream](docs/sdks/chat/README.md#stream) - Stream chat completion +* [complete](docs/sdks/chat/README.md#complete) - Chat Completion + + + +## Server-sent event streaming + +[Server-sent events][mdn-sse] are used to stream content from certain +operations. These operations will expose the stream as [Generator][generator] that +can be consumed using a simple `for` loop. The loop will +terminate when the server no longer has any events to send and closes the +underlying connection. + +```python +from mistralai.azure.client import MistralAzure +import os + +AZURE_API_KEY = os.environ["AZURE_API_KEY"] +AZURE_ENDPOINT = os.environ["AZURE_ENDPOINT"] +AZURE_MODEL = os.environ["AZURE_MODEL"] +AZURE_API_VERSION = os.environ.get("AZURE_API_VERSION", "2024-05-01-preview") + +# The SDK automatically injects api-version as a query parameter +s = MistralAzure( + api_key=AZURE_API_KEY, + server_url=AZURE_ENDPOINT, + api_version=AZURE_API_VERSION, +) + +res = s.chat.stream( + messages=[ + { + "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", + }, + ], + model=AZURE_MODEL, +) + +if res is not None: + for event in res: + # handle event + print(event) + +``` + +[mdn-sse]: https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events +[generator]: https://wiki.python.org/moin/Generators + + + +## Retries + +Some of the endpoints in this SDK support retries. If you use the SDK without any configuration, it will fall back to the default retry strategy provided by the API. However, the default retry strategy can be overridden on a per-operation basis, or across the entire SDK. + +To change the default retry strategy for a single API call, simply provide a `RetryConfig` object to the call: +```python +from mistralai.azure.client import MistralAzure +from mistralai.azure.client.utils import BackoffStrategy, RetryConfig +import os + +AZURE_API_KEY = os.environ["AZURE_API_KEY"] +AZURE_ENDPOINT = os.environ["AZURE_ENDPOINT"] +AZURE_MODEL = os.environ["AZURE_MODEL"] +AZURE_API_VERSION = os.environ.get("AZURE_API_VERSION", "2024-05-01-preview") + +# The SDK automatically injects api-version as a query parameter +s = MistralAzure( + api_key=AZURE_API_KEY, + server_url=AZURE_ENDPOINT, + api_version=AZURE_API_VERSION, +) + +res = s.chat.stream( + messages=[ + { + "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", + }, + ], + model=AZURE_MODEL, + retries=RetryConfig( + "backoff", + BackoffStrategy(1, 50, 1.1, 100), + False + ), +) + +if res is not None: + for event in res: + # handle event + print(event) + +``` + +If you'd like to override the default retry strategy for all operations that support retries, you can use the `retry_config` optional parameter when initializing the SDK: +```python +from mistralai.azure.client import MistralAzure +from mistralai.azure.client.utils import BackoffStrategy, RetryConfig +import os + +AZURE_API_KEY = os.environ["AZURE_API_KEY"] +AZURE_ENDPOINT = os.environ["AZURE_ENDPOINT"] +AZURE_MODEL = os.environ["AZURE_MODEL"] +AZURE_API_VERSION = os.environ["AZURE_API_VERSION"] + +# The SDK automatically injects api-version as a query parameter +s = MistralAzure( + api_key=AZURE_API_KEY, + server_url=AZURE_ENDPOINT, + api_version=AZURE_API_VERSION, + retry_config=RetryConfig("backoff", BackoffStrategy(1, 50, 1.1, 100), False), +) + +res = s.chat.stream( + messages=[ + { + "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", + }, + ], + model=AZURE_MODEL, +) + +if res is not None: + for event in res: + # handle event + print(event) + +``` + + + +## Error Handling + +Handling errors in this SDK should largely match your expectations. All operations return a response object or raise an error. If Error objects are specified in your OpenAPI Spec, the SDK will raise the appropriate Error type. + +| Error Object | Status Code | Content Type | +| -------------------------- | ----------- | ---------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4xx-5xx | */* | + +### Example + +```python +from mistralai.azure.client import MistralAzure +from mistralai.azure.client import models +import os + +AZURE_API_KEY = os.environ["AZURE_API_KEY"] +AZURE_ENDPOINT = os.environ["AZURE_ENDPOINT"] +AZURE_MODEL = os.environ["AZURE_MODEL"] +AZURE_API_VERSION = os.environ.get("AZURE_API_VERSION", "2024-05-01-preview") + +# The SDK automatically injects api-version as a query parameter +s = MistralAzure( + api_key=AZURE_API_KEY, + server_url=AZURE_ENDPOINT, + api_version=AZURE_API_VERSION, +) + +res = None +try: + res = s.chat.complete( + messages=[ + { + "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", + }, + ], + model=AZURE_MODEL, + ) + +except models.HTTPValidationError as e: + # handle exception + raise(e) +except models.SDKError as e: + # handle exception + raise(e) + +if res is not None: + # handle response + pass + +``` + + + +## Server Selection + +### Override Server URL Per-Client + +For Azure, you must provide your Azure AI Foundry endpoint via `server_url`. The SDK automatically injects the `api-version` query parameter: +```python +from mistralai.azure.client import MistralAzure +import os + +s = MistralAzure( + api_key=os.environ["AZURE_API_KEY"], + server_url=os.environ["AZURE_ENDPOINT"], + api_version=os.environ.get("AZURE_API_VERSION", "2024-05-01-preview"), +) + +res = s.chat.stream( + messages=[ + { + "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", + }, + ], + model=os.environ["AZURE_MODEL"], +) + +if res is not None: + for event in res: + # handle event + print(event) + +``` + + + +## Custom HTTP Client + +The Python SDK makes API calls using the [httpx](https://www.python-httpx.org/) HTTP library. In order to provide a convenient way to configure timeouts, cookies, proxies, custom headers, and other low-level configuration, you can initialize the SDK client with your own HTTP client instance. +Depending on whether you are using the sync or async version of the SDK, you can pass an instance of `HttpClient` or `AsyncHttpClient` respectively, which are Protocol's ensuring that the client has the necessary methods to make API calls. +This allows you to wrap the client with your own custom logic, such as adding custom headers, logging, or error handling, or you can just pass an instance of `httpx.Client` or `httpx.AsyncClient` directly. + +For example, you could specify a header for every request that this sdk makes as follows: +```python +from mistralai.azure.client import MistralAzure +import httpx +import os + +http_client = httpx.Client(headers={"x-custom-header": "someValue"}) +s = MistralAzure( + api_key=os.environ["AZURE_API_KEY"], + server_url=os.environ["AZURE_ENDPOINT"], + api_version=os.environ.get("AZURE_API_VERSION", "2024-05-01-preview"), + client=http_client, +) +``` + +or you could wrap the client with your own custom logic: +```python +from typing import Any, Optional, Union +from mistralai.azure.client import MistralAzure +from mistralai.azure.client.httpclient import AsyncHttpClient +import httpx + +class CustomClient(AsyncHttpClient): + client: AsyncHttpClient + + def __init__(self, client: AsyncHttpClient): + self.client = client + + async def send( + self, + request: httpx.Request, + *, + stream: bool = False, + auth: Union[ + httpx._types.AuthTypes, httpx._client.UseClientDefault, None + ] = httpx.USE_CLIENT_DEFAULT, + follow_redirects: Union[ + bool, httpx._client.UseClientDefault + ] = httpx.USE_CLIENT_DEFAULT, + ) -> httpx.Response: + request.headers["Client-Level-Header"] = "added by client" + + return await self.client.send( + request, stream=stream, auth=auth, follow_redirects=follow_redirects + ) + + def build_request( + self, + method: str, + url: httpx._types.URLTypes, + *, + content: Optional[httpx._types.RequestContent] = None, + data: Optional[httpx._types.RequestData] = None, + files: Optional[httpx._types.RequestFiles] = None, + json: Optional[Any] = None, + params: Optional[httpx._types.QueryParamTypes] = None, + headers: Optional[httpx._types.HeaderTypes] = None, + cookies: Optional[httpx._types.CookieTypes] = None, + timeout: Union[ + httpx._types.TimeoutTypes, httpx._client.UseClientDefault + ] = httpx.USE_CLIENT_DEFAULT, + extensions: Optional[httpx._types.RequestExtensions] = None, + ) -> httpx.Request: + return self.client.build_request( + method, + url, + content=content, + data=data, + files=files, + json=json, + params=params, + headers=headers, + cookies=cookies, + timeout=timeout, + extensions=extensions, + ) + +s = MistralAzure( + api_key="", + server_url="", + async_client=CustomClient(httpx.AsyncClient()), +) +``` + + + +## Authentication + +### Per-Client Security Schemes + +This SDK supports the following security scheme globally: + +| Name | Type | Scheme | +| --------- | ---- | ----------- | +| `api_key` | http | HTTP Bearer | + +To authenticate with the API the `api_key` parameter must be set when initializing the SDK client instance. You must also provide `server_url` pointing to your Azure AI Foundry endpoint. The SDK automatically injects the `api-version` query parameter: +```python +from mistralai.azure.client import MistralAzure +import os + +s = MistralAzure( + api_key=os.environ["AZURE_API_KEY"], + server_url=os.environ["AZURE_ENDPOINT"], + api_version=os.environ.get("AZURE_API_VERSION", "2024-05-01-preview"), +) + +res = s.chat.stream( + messages=[ + { + "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", + }, + ], + model=os.environ["AZURE_MODEL"], +) + +if res is not None: + for event in res: + # handle event + print(event) + +``` + + + + +# Development + +## Contributions + +While we value open-source contributions to this SDK, this library is generated programmatically. Any manual changes added to internal files will be overwritten on the next generation. +We look forward to hearing your feedback. Feel free to open a PR or an issue with a proof of concept and we'll do our best to include it in a future release. diff --git a/packages/azure/RELEASES.md b/packages/azure/RELEASES.md new file mode 100644 index 00000000..2090c67a --- /dev/null +++ b/packages/azure/RELEASES.md @@ -0,0 +1,31 @@ + + +## 2026-01-12 15:30:32 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.606.10 (2.687.13) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.7.0] packages/mistralai_azure +### Releases +- [PyPI v1.7.0] https://pypi.org/project/mistralai_azure/1.7.0 - packages/mistralai_azure + +## 2026-02-25 17:39:51 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.729.0 (2.841.0) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v2.0.0b1] packages/azure +### Releases +- [PyPI v2.0.0b1] https://pypi.org/project/mistralai-azure/2.0.0b1 - packages/azure + +## 2026-03-02 13:15:00 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.729.0 (2.841.0) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v2.0.0rc1] packages/azure +### Releases +- [PyPI v2.0.0rc1] https://pypi.org/project/mistralai-azure/2.0.0rc1 - packages/azure \ No newline at end of file diff --git a/packages/azure/USAGE.md b/packages/azure/USAGE.md new file mode 100644 index 00000000..a4bc5147 --- /dev/null +++ b/packages/azure/USAGE.md @@ -0,0 +1,70 @@ + +### Create Chat Completions + +This example shows how to create chat completions. + +The SDK automatically injects the `api-version` query parameter. + +```python +# Synchronous Example +from mistralai.azure.client import MistralAzure +import os + +AZURE_API_KEY = os.environ["AZURE_API_KEY"] +AZURE_ENDPOINT = os.environ["AZURE_ENDPOINT"] +AZURE_MODEL = os.environ["AZURE_MODEL"] +AZURE_API_VERSION = os.environ.get("AZURE_API_VERSION", "2024-05-01-preview") + +# The SDK automatically injects api-version as a query parameter +s = MistralAzure( + api_key=AZURE_API_KEY, + server_url=AZURE_ENDPOINT, + api_version=AZURE_API_VERSION, +) + +res = s.chat.complete(messages=[ + { + "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", + }, +], model=AZURE_MODEL) + +if res is not None: + # handle response + print(res.choices[0].message.content) +``` + +
+ +The same SDK client can also be used to make asynchronous requests by importing asyncio. +```python +# Asynchronous Example +import asyncio +import os +from mistralai.azure.client import MistralAzure + +AZURE_API_KEY = os.environ["AZURE_API_KEY"] +AZURE_ENDPOINT = os.environ["AZURE_ENDPOINT"] +AZURE_MODEL = os.environ["AZURE_MODEL"] +AZURE_API_VERSION = os.environ.get("AZURE_API_VERSION", "2024-05-01-preview") + +async def main(): + # The SDK automatically injects api-version as a query parameter + s = MistralAzure( + api_key=AZURE_API_KEY, + server_url=AZURE_ENDPOINT, + api_version=AZURE_API_VERSION, + ) + res = await s.chat.complete_async(messages=[ + { + "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", + }, + ], model=AZURE_MODEL) + if res is not None: + # handle response + print(res.choices[0].message.content) + +asyncio.run(main()) +``` + diff --git a/packages/azure/docs/errors/httpvalidationerror.md b/packages/azure/docs/errors/httpvalidationerror.md new file mode 100644 index 00000000..712a148c --- /dev/null +++ b/packages/azure/docs/errors/httpvalidationerror.md @@ -0,0 +1,8 @@ +# HTTPValidationError + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | +| `detail` | List[[models.ValidationError](../models/validationerror.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/azure/docs/models/arguments.md b/packages/azure/docs/models/arguments.md new file mode 100644 index 00000000..2e54e27e --- /dev/null +++ b/packages/azure/docs/models/arguments.md @@ -0,0 +1,17 @@ +# Arguments + + +## Supported Types + +### `Dict[str, Any]` + +```python +value: Dict[str, Any] = /* values here */ +``` + +### `str` + +```python +value: str = /* values here */ +``` + diff --git a/packages/azure/docs/models/assistantmessage.md b/packages/azure/docs/models/assistantmessage.md new file mode 100644 index 00000000..9ef63837 --- /dev/null +++ b/packages/azure/docs/models/assistantmessage.md @@ -0,0 +1,11 @@ +# AssistantMessage + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `role` | *Optional[Literal["assistant"]]* | :heavy_minus_sign: | N/A | +| `content` | [OptionalNullable[models.AssistantMessageContent]](../models/assistantmessagecontent.md) | :heavy_minus_sign: | N/A | +| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | +| `prefix` | *Optional[bool]* | :heavy_minus_sign: | Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message. | \ No newline at end of file diff --git a/packages/azure/docs/models/assistantmessagecontent.md b/packages/azure/docs/models/assistantmessagecontent.md new file mode 100644 index 00000000..047b7cf9 --- /dev/null +++ b/packages/azure/docs/models/assistantmessagecontent.md @@ -0,0 +1,17 @@ +# AssistantMessageContent + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[models.ContentChunk]` + +```python +value: List[models.ContentChunk] = /* values here */ +``` + diff --git a/packages/azure/docs/models/chatcompletionchoice.md b/packages/azure/docs/models/chatcompletionchoice.md new file mode 100644 index 00000000..deaa0ea0 --- /dev/null +++ b/packages/azure/docs/models/chatcompletionchoice.md @@ -0,0 +1,10 @@ +# ChatCompletionChoice + + +## Fields + +| Field | Type | Required | Description | Example | +| ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | +| `index` | *int* | :heavy_check_mark: | N/A | 0 | +| `message` | [models.AssistantMessage](../models/assistantmessage.md) | :heavy_check_mark: | N/A | | +| `finish_reason` | [models.ChatCompletionChoiceFinishReason](../models/chatcompletionchoicefinishreason.md) | :heavy_check_mark: | N/A | stop | \ No newline at end of file diff --git a/packages/azure/docs/models/chatcompletionchoicefinishreason.md b/packages/azure/docs/models/chatcompletionchoicefinishreason.md new file mode 100644 index 00000000..b2f15ecb --- /dev/null +++ b/packages/azure/docs/models/chatcompletionchoicefinishreason.md @@ -0,0 +1,12 @@ +# ChatCompletionChoiceFinishReason + + +## Values + +| Name | Value | +| -------------- | -------------- | +| `STOP` | stop | +| `LENGTH` | length | +| `MODEL_LENGTH` | model_length | +| `ERROR` | error | +| `TOOL_CALLS` | tool_calls | \ No newline at end of file diff --git a/packages/azure/docs/models/chatcompletionrequest.md b/packages/azure/docs/models/chatcompletionrequest.md new file mode 100644 index 00000000..f8715cd0 --- /dev/null +++ b/packages/azure/docs/models/chatcompletionrequest.md @@ -0,0 +1,26 @@ +# ChatCompletionRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Optional[str]* | :heavy_minus_sign: | The ID of the model to use for this request. | azureai | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | +| `stop` | [Optional[models.ChatCompletionRequestStop]](../models/chatcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | +| `messages` | List[[models.ChatCompletionRequestMessage](../models/chatcompletionrequestmessage.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | **Example 1:** {
"type": "text"
}
**Example 2:** {
"type": "json_object"
}
**Example 3:** {
"type": "json_schema",
"json_schema": {
"schema": {
"properties": {
"name": {
"title": "Name",
"type": "string"
},
"authors": {
"items": {
"type": "string"
},
"title": "Authors",
"type": "array"
}
},
"required": [
"name",
"authors"
],
"title": "Book",
"type": "object",
"additionalProperties": false
},
"name": "book",
"strict": true
}
} | +| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. | | +| `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. | | +| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | +| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. | | +| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | +| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file diff --git a/packages/azure/docs/models/chatcompletionrequestmessage.md b/packages/azure/docs/models/chatcompletionrequestmessage.md new file mode 100644 index 00000000..91e9e062 --- /dev/null +++ b/packages/azure/docs/models/chatcompletionrequestmessage.md @@ -0,0 +1,29 @@ +# ChatCompletionRequestMessage + + +## Supported Types + +### `models.AssistantMessage` + +```python +value: models.AssistantMessage = /* values here */ +``` + +### `models.SystemMessage` + +```python +value: models.SystemMessage = /* values here */ +``` + +### `models.ToolMessage` + +```python +value: models.ToolMessage = /* values here */ +``` + +### `models.UserMessage` + +```python +value: models.UserMessage = /* values here */ +``` + diff --git a/packages/azure/docs/models/chatcompletionrequeststop.md b/packages/azure/docs/models/chatcompletionrequeststop.md new file mode 100644 index 00000000..749296d4 --- /dev/null +++ b/packages/azure/docs/models/chatcompletionrequeststop.md @@ -0,0 +1,19 @@ +# ChatCompletionRequestStop + +Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[str]` + +```python +value: List[str] = /* values here */ +``` + diff --git a/packages/azure/docs/models/chatcompletionrequesttoolchoice.md b/packages/azure/docs/models/chatcompletionrequesttoolchoice.md new file mode 100644 index 00000000..dc82a8ef --- /dev/null +++ b/packages/azure/docs/models/chatcompletionrequesttoolchoice.md @@ -0,0 +1,19 @@ +# ChatCompletionRequestToolChoice + +Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. + + +## Supported Types + +### `models.ToolChoice` + +```python +value: models.ToolChoice = /* values here */ +``` + +### `models.ToolChoiceEnum` + +```python +value: models.ToolChoiceEnum = /* values here */ +``` + diff --git a/packages/azure/docs/models/chatcompletionresponse.md b/packages/azure/docs/models/chatcompletionresponse.md new file mode 100644 index 00000000..a0465ffb --- /dev/null +++ b/packages/azure/docs/models/chatcompletionresponse.md @@ -0,0 +1,13 @@ +# ChatCompletionResponse + + +## Fields + +| Field | Type | Required | Description | Example | +| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | cmpl-e5cc70bb28c444948073e77776eb30ef | +| `object` | *str* | :heavy_check_mark: | N/A | chat.completion | +| `model` | *str* | :heavy_check_mark: | N/A | mistral-small-latest | +| `usage` | [models.UsageInfo](../models/usageinfo.md) | :heavy_check_mark: | N/A | | +| `created` | *int* | :heavy_check_mark: | N/A | 1702256327 | +| `choices` | List[[models.ChatCompletionChoice](../models/chatcompletionchoice.md)] | :heavy_check_mark: | N/A | | \ No newline at end of file diff --git a/packages/azure/docs/models/chatcompletionstreamrequest.md b/packages/azure/docs/models/chatcompletionstreamrequest.md new file mode 100644 index 00000000..cc82a8c7 --- /dev/null +++ b/packages/azure/docs/models/chatcompletionstreamrequest.md @@ -0,0 +1,26 @@ +# ChatCompletionStreamRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Optional[str]* | :heavy_minus_sign: | The ID of the model to use for this request. | azureai | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `stop` | [Optional[models.ChatCompletionStreamRequestStop]](../models/chatcompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | +| `messages` | List[[models.ChatCompletionStreamRequestMessage](../models/chatcompletionstreamrequestmessage.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | **Example 1:** {
"type": "text"
}
**Example 2:** {
"type": "json_object"
}
**Example 3:** {
"type": "json_schema",
"json_schema": {
"schema": {
"properties": {
"name": {
"title": "Name",
"type": "string"
},
"authors": {
"items": {
"type": "string"
},
"title": "Authors",
"type": "array"
}
},
"required": [
"name",
"authors"
],
"title": "Book",
"type": "object",
"additionalProperties": false
},
"name": "book",
"strict": true
}
} | +| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. | | +| `tool_choice` | [Optional[models.ChatCompletionStreamRequestToolChoice]](../models/chatcompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. | | +| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | +| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. | | +| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | +| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file diff --git a/packages/azure/docs/models/chatcompletionstreamrequestmessage.md b/packages/azure/docs/models/chatcompletionstreamrequestmessage.md new file mode 100644 index 00000000..2e4e93ac --- /dev/null +++ b/packages/azure/docs/models/chatcompletionstreamrequestmessage.md @@ -0,0 +1,29 @@ +# ChatCompletionStreamRequestMessage + + +## Supported Types + +### `models.AssistantMessage` + +```python +value: models.AssistantMessage = /* values here */ +``` + +### `models.SystemMessage` + +```python +value: models.SystemMessage = /* values here */ +``` + +### `models.ToolMessage` + +```python +value: models.ToolMessage = /* values here */ +``` + +### `models.UserMessage` + +```python +value: models.UserMessage = /* values here */ +``` + diff --git a/packages/azure/docs/models/chatcompletionstreamrequeststop.md b/packages/azure/docs/models/chatcompletionstreamrequeststop.md new file mode 100644 index 00000000..a48460a9 --- /dev/null +++ b/packages/azure/docs/models/chatcompletionstreamrequeststop.md @@ -0,0 +1,19 @@ +# ChatCompletionStreamRequestStop + +Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[str]` + +```python +value: List[str] = /* values here */ +``` + diff --git a/packages/azure/docs/models/chatcompletionstreamrequesttoolchoice.md b/packages/azure/docs/models/chatcompletionstreamrequesttoolchoice.md new file mode 100644 index 00000000..43f3ca38 --- /dev/null +++ b/packages/azure/docs/models/chatcompletionstreamrequesttoolchoice.md @@ -0,0 +1,19 @@ +# ChatCompletionStreamRequestToolChoice + +Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. + + +## Supported Types + +### `models.ToolChoice` + +```python +value: models.ToolChoice = /* values here */ +``` + +### `models.ToolChoiceEnum` + +```python +value: models.ToolChoiceEnum = /* values here */ +``` + diff --git a/packages/azure/docs/models/completionchunk.md b/packages/azure/docs/models/completionchunk.md new file mode 100644 index 00000000..7f8ab5e6 --- /dev/null +++ b/packages/azure/docs/models/completionchunk.md @@ -0,0 +1,13 @@ +# CompletionChunk + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | +| `id` | *str* | :heavy_check_mark: | N/A | +| `object` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `created` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `model` | *str* | :heavy_check_mark: | N/A | +| `usage` | [Optional[models.UsageInfo]](../models/usageinfo.md) | :heavy_minus_sign: | N/A | +| `choices` | List[[models.CompletionResponseStreamChoice](../models/completionresponsestreamchoice.md)] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/azure/docs/models/completionevent.md b/packages/azure/docs/models/completionevent.md new file mode 100644 index 00000000..7a66e8fe --- /dev/null +++ b/packages/azure/docs/models/completionevent.md @@ -0,0 +1,8 @@ +# CompletionEvent + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------ | ------------------------------------------------------ | ------------------------------------------------------ | ------------------------------------------------------ | +| `data` | [models.CompletionChunk](../models/completionchunk.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/azure/docs/models/completionresponsestreamchoice.md b/packages/azure/docs/models/completionresponsestreamchoice.md new file mode 100644 index 00000000..1532c25b --- /dev/null +++ b/packages/azure/docs/models/completionresponsestreamchoice.md @@ -0,0 +1,10 @@ +# CompletionResponseStreamChoice + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------- | +| `index` | *int* | :heavy_check_mark: | N/A | +| `delta` | [models.DeltaMessage](../models/deltamessage.md) | :heavy_check_mark: | N/A | +| `finish_reason` | [Nullable[models.CompletionResponseStreamChoiceFinishReason]](../models/completionresponsestreamchoicefinishreason.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/azure/docs/models/completionresponsestreamchoicefinishreason.md b/packages/azure/docs/models/completionresponsestreamchoicefinishreason.md new file mode 100644 index 00000000..0fece473 --- /dev/null +++ b/packages/azure/docs/models/completionresponsestreamchoicefinishreason.md @@ -0,0 +1,11 @@ +# CompletionResponseStreamChoiceFinishReason + + +## Values + +| Name | Value | +| ------------ | ------------ | +| `STOP` | stop | +| `LENGTH` | length | +| `ERROR` | error | +| `TOOL_CALLS` | tool_calls | \ No newline at end of file diff --git a/packages/azure/docs/models/contentchunk.md b/packages/azure/docs/models/contentchunk.md new file mode 100644 index 00000000..22023e8b --- /dev/null +++ b/packages/azure/docs/models/contentchunk.md @@ -0,0 +1,23 @@ +# ContentChunk + + +## Supported Types + +### `models.ImageURLChunk` + +```python +value: models.ImageURLChunk = /* values here */ +``` + +### `models.TextChunk` + +```python +value: models.TextChunk = /* values here */ +``` + +### `models.ReferenceChunk` + +```python +value: models.ReferenceChunk = /* values here */ +``` + diff --git a/packages/azure/docs/models/deltamessage.md b/packages/azure/docs/models/deltamessage.md new file mode 100644 index 00000000..e0ee575f --- /dev/null +++ b/packages/azure/docs/models/deltamessage.md @@ -0,0 +1,10 @@ +# DeltaMessage + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | +| `role` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `content` | [OptionalNullable[models.DeltaMessageContent]](../models/deltamessagecontent.md) | :heavy_minus_sign: | N/A | +| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/azure/docs/models/deltamessagecontent.md b/packages/azure/docs/models/deltamessagecontent.md new file mode 100644 index 00000000..8142772d --- /dev/null +++ b/packages/azure/docs/models/deltamessagecontent.md @@ -0,0 +1,17 @@ +# DeltaMessageContent + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[models.ContentChunk]` + +```python +value: List[models.ContentChunk] = /* values here */ +``` + diff --git a/packages/azure/docs/models/document.md b/packages/azure/docs/models/document.md new file mode 100644 index 00000000..509d43b7 --- /dev/null +++ b/packages/azure/docs/models/document.md @@ -0,0 +1,25 @@ +# Document + +Document to run OCR on + + +## Supported Types + +### `models.FileChunk` + +```python +value: models.FileChunk = /* values here */ +``` + +### `models.DocumentURLChunk` + +```python +value: models.DocumentURLChunk = /* values here */ +``` + +### `models.ImageURLChunk` + +```python +value: models.ImageURLChunk = /* values here */ +``` + diff --git a/packages/azure/docs/models/documenturlchunk.md b/packages/azure/docs/models/documenturlchunk.md new file mode 100644 index 00000000..9dbfbe50 --- /dev/null +++ b/packages/azure/docs/models/documenturlchunk.md @@ -0,0 +1,10 @@ +# DocumentURLChunk + + +## Fields + +| Field | Type | Required | Description | +| ----------------------------------- | ----------------------------------- | ----------------------------------- | ----------------------------------- | +| `type` | *Optional[Literal["document_url"]]* | :heavy_minus_sign: | N/A | +| `document_url` | *str* | :heavy_check_mark: | N/A | +| `document_name` | *OptionalNullable[str]* | :heavy_minus_sign: | The filename of the document | \ No newline at end of file diff --git a/packages/azure/docs/models/filechunk.md b/packages/azure/docs/models/filechunk.md new file mode 100644 index 00000000..18217114 --- /dev/null +++ b/packages/azure/docs/models/filechunk.md @@ -0,0 +1,9 @@ +# FileChunk + + +## Fields + +| Field | Type | Required | Description | +| --------------------------- | --------------------------- | --------------------------- | --------------------------- | +| `type` | *Optional[Literal["file"]]* | :heavy_minus_sign: | N/A | +| `file_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/azure/docs/models/format_.md b/packages/azure/docs/models/format_.md new file mode 100644 index 00000000..97d286a4 --- /dev/null +++ b/packages/azure/docs/models/format_.md @@ -0,0 +1,11 @@ +# Format + +Format of the table + + +## Values + +| Name | Value | +| ---------- | ---------- | +| `MARKDOWN` | markdown | +| `HTML` | html | \ No newline at end of file diff --git a/packages/azure/docs/models/function.md b/packages/azure/docs/models/function.md new file mode 100644 index 00000000..b2bdb3fe --- /dev/null +++ b/packages/azure/docs/models/function.md @@ -0,0 +1,11 @@ +# Function + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `name` | *str* | :heavy_check_mark: | N/A | +| `description` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `strict` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `parameters` | Dict[str, *Any*] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/azure/docs/models/functioncall.md b/packages/azure/docs/models/functioncall.md new file mode 100644 index 00000000..7ccd90dc --- /dev/null +++ b/packages/azure/docs/models/functioncall.md @@ -0,0 +1,9 @@ +# FunctionCall + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | +| `name` | *str* | :heavy_check_mark: | N/A | +| `arguments` | [models.Arguments](../models/arguments.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/azure/docs/models/functionname.md b/packages/azure/docs/models/functionname.md new file mode 100644 index 00000000..87d7b485 --- /dev/null +++ b/packages/azure/docs/models/functionname.md @@ -0,0 +1,10 @@ +# FunctionName + +this restriction of `Function` is used to select a specific function to call + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `name` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/azure/docs/models/imagedetail.md b/packages/azure/docs/models/imagedetail.md new file mode 100644 index 00000000..1e5ba3fd --- /dev/null +++ b/packages/azure/docs/models/imagedetail.md @@ -0,0 +1,10 @@ +# ImageDetail + + +## Values + +| Name | Value | +| ------ | ------ | +| `LOW` | low | +| `AUTO` | auto | +| `HIGH` | high | \ No newline at end of file diff --git a/packages/azure/docs/models/imageurl.md b/packages/azure/docs/models/imageurl.md new file mode 100644 index 00000000..6358e0ac --- /dev/null +++ b/packages/azure/docs/models/imageurl.md @@ -0,0 +1,9 @@ +# ImageURL + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | +| `url` | *str* | :heavy_check_mark: | N/A | +| `detail` | [OptionalNullable[models.ImageDetail]](../models/imagedetail.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/azure/docs/models/imageurlchunk.md b/packages/azure/docs/models/imageurlchunk.md new file mode 100644 index 00000000..db0c53d2 --- /dev/null +++ b/packages/azure/docs/models/imageurlchunk.md @@ -0,0 +1,11 @@ +# ImageURLChunk + +{"type":"image_url","image_url":{"url":"data:image/png;base64,iVBORw0 + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | +| `type` | *Optional[Literal["image_url"]]* | :heavy_minus_sign: | N/A | +| `image_url` | [models.ImageURLUnion](../models/imageurlunion.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/azure/docs/models/imageurlunion.md b/packages/azure/docs/models/imageurlunion.md new file mode 100644 index 00000000..db97130f --- /dev/null +++ b/packages/azure/docs/models/imageurlunion.md @@ -0,0 +1,17 @@ +# ImageURLUnion + + +## Supported Types + +### `models.ImageURL` + +```python +value: models.ImageURL = /* values here */ +``` + +### `str` + +```python +value: str = /* values here */ +``` + diff --git a/packages/azure/docs/models/jsonschema.md b/packages/azure/docs/models/jsonschema.md new file mode 100644 index 00000000..7ff7c070 --- /dev/null +++ b/packages/azure/docs/models/jsonschema.md @@ -0,0 +1,11 @@ +# JSONSchema + + +## Fields + +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `name` | *str* | :heavy_check_mark: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `schema_definition` | Dict[str, *Any*] | :heavy_check_mark: | N/A | +| `strict` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/azure/docs/models/loc.md b/packages/azure/docs/models/loc.md new file mode 100644 index 00000000..d6094ac2 --- /dev/null +++ b/packages/azure/docs/models/loc.md @@ -0,0 +1,17 @@ +# Loc + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `int` + +```python +value: int = /* values here */ +``` + diff --git a/packages/azure/docs/models/mistralpromptmode.md b/packages/azure/docs/models/mistralpromptmode.md new file mode 100644 index 00000000..c3409d03 --- /dev/null +++ b/packages/azure/docs/models/mistralpromptmode.md @@ -0,0 +1,12 @@ +# MistralPromptMode + +Available options to the prompt_mode argument on the chat completion endpoint. +Values represent high-level intent. Assignment to actual SPs is handled internally. +System prompt may include knowledge cutoff date, model capabilities, tone to use, safety guidelines, etc. + + +## Values + +| Name | Value | +| ----------- | ----------- | +| `REASONING` | reasoning | \ No newline at end of file diff --git a/packages/azure/docs/models/ocrimageobject.md b/packages/azure/docs/models/ocrimageobject.md new file mode 100644 index 00000000..3c0d5544 --- /dev/null +++ b/packages/azure/docs/models/ocrimageobject.md @@ -0,0 +1,14 @@ +# OCRImageObject + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | Image ID for extracted image in a page | +| `top_left_x` | *Nullable[int]* | :heavy_check_mark: | X coordinate of top-left corner of the extracted image | +| `top_left_y` | *Nullable[int]* | :heavy_check_mark: | Y coordinate of top-left corner of the extracted image | +| `bottom_right_x` | *Nullable[int]* | :heavy_check_mark: | X coordinate of bottom-right corner of the extracted image | +| `bottom_right_y` | *Nullable[int]* | :heavy_check_mark: | Y coordinate of bottom-right corner of the extracted image | +| `image_base64` | *OptionalNullable[str]* | :heavy_minus_sign: | Base64 string of the extracted image | +| `image_annotation` | *OptionalNullable[str]* | :heavy_minus_sign: | Annotation of the extracted image in json str | \ No newline at end of file diff --git a/packages/azure/docs/models/ocrpagedimensions.md b/packages/azure/docs/models/ocrpagedimensions.md new file mode 100644 index 00000000..c93ca64d --- /dev/null +++ b/packages/azure/docs/models/ocrpagedimensions.md @@ -0,0 +1,10 @@ +# OCRPageDimensions + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------- | ------------------------------- | ------------------------------- | ------------------------------- | +| `dpi` | *int* | :heavy_check_mark: | Dots per inch of the page-image | +| `height` | *int* | :heavy_check_mark: | Height of the image in pixels | +| `width` | *int* | :heavy_check_mark: | Width of the image in pixels | \ No newline at end of file diff --git a/packages/azure/docs/models/ocrpageobject.md b/packages/azure/docs/models/ocrpageobject.md new file mode 100644 index 00000000..02473d44 --- /dev/null +++ b/packages/azure/docs/models/ocrpageobject.md @@ -0,0 +1,15 @@ +# OCRPageObject + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `index` | *int* | :heavy_check_mark: | The page index in a pdf document starting from 0 | +| `markdown` | *str* | :heavy_check_mark: | The markdown string response of the page | +| `images` | List[[models.OCRImageObject](../models/ocrimageobject.md)] | :heavy_check_mark: | List of all extracted images in the page | +| `tables` | List[[models.OCRTableObject](../models/ocrtableobject.md)] | :heavy_minus_sign: | List of all extracted tables in the page | +| `hyperlinks` | List[*str*] | :heavy_minus_sign: | List of all hyperlinks in the page | +| `header` | *OptionalNullable[str]* | :heavy_minus_sign: | Header of the page | +| `footer` | *OptionalNullable[str]* | :heavy_minus_sign: | Footer of the page | +| `dimensions` | [Nullable[models.OCRPageDimensions]](../models/ocrpagedimensions.md) | :heavy_check_mark: | The dimensions of the PDF Page's screenshot image | \ No newline at end of file diff --git a/packages/azure/docs/models/ocrrequest.md b/packages/azure/docs/models/ocrrequest.md new file mode 100644 index 00000000..2d26c19f --- /dev/null +++ b/packages/azure/docs/models/ocrrequest.md @@ -0,0 +1,20 @@ +# OCRRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | N/A | | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | | +| `document` | [models.Document](../models/document.md) | :heavy_check_mark: | Document to run OCR on | | +| `pages` | List[*int*] | :heavy_minus_sign: | Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0 | | +| `include_image_base64` | *OptionalNullable[bool]* | :heavy_minus_sign: | Include image URLs in response | | +| `image_limit` | *OptionalNullable[int]* | :heavy_minus_sign: | Max images to extract | | +| `image_min_size` | *OptionalNullable[int]* | :heavy_minus_sign: | Minimum height and width of image to extract | | +| `bbox_annotation_format` | [OptionalNullable[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field | **Example 1:** {
"type": "text"
}
**Example 2:** {
"type": "json_object"
}
**Example 3:** {
"type": "json_schema",
"json_schema": {
"schema": {
"properties": {
"name": {
"title": "Name",
"type": "string"
},
"authors": {
"items": {
"type": "string"
},
"title": "Authors",
"type": "array"
}
},
"required": [
"name",
"authors"
],
"title": "Book",
"type": "object",
"additionalProperties": false
},
"name": "book",
"strict": true
}
} | +| `document_annotation_format` | [OptionalNullable[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field | **Example 1:** {
"type": "text"
}
**Example 2:** {
"type": "json_object"
}
**Example 3:** {
"type": "json_schema",
"json_schema": {
"schema": {
"properties": {
"name": {
"title": "Name",
"type": "string"
},
"authors": {
"items": {
"type": "string"
},
"title": "Authors",
"type": "array"
}
},
"required": [
"name",
"authors"
],
"title": "Book",
"type": "object",
"additionalProperties": false
},
"name": "book",
"strict": true
}
} | +| `document_annotation_prompt` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional prompt to guide the model in extracting structured output from the entire document. A document_annotation_format must be provided. | | +| `table_format` | [OptionalNullable[models.TableFormat]](../models/tableformat.md) | :heavy_minus_sign: | N/A | | +| `extract_header` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `extract_footer` | *Optional[bool]* | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/packages/azure/docs/models/ocrresponse.md b/packages/azure/docs/models/ocrresponse.md new file mode 100644 index 00000000..0a309317 --- /dev/null +++ b/packages/azure/docs/models/ocrresponse.md @@ -0,0 +1,11 @@ +# OCRResponse + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | +| `pages` | List[[models.OCRPageObject](../models/ocrpageobject.md)] | :heavy_check_mark: | List of OCR info for pages. | +| `model` | *str* | :heavy_check_mark: | The model used to generate the OCR. | +| `document_annotation` | *OptionalNullable[str]* | :heavy_minus_sign: | Formatted response in the request_format if provided in json str | +| `usage_info` | [models.OCRUsageInfo](../models/ocrusageinfo.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/azure/docs/models/ocrtableobject.md b/packages/azure/docs/models/ocrtableobject.md new file mode 100644 index 00000000..4e27697c --- /dev/null +++ b/packages/azure/docs/models/ocrtableobject.md @@ -0,0 +1,10 @@ +# OCRTableObject + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------- | ---------------------------------------- | ---------------------------------------- | ---------------------------------------- | +| `id` | *str* | :heavy_check_mark: | Table ID for extracted table in a page | +| `content` | *str* | :heavy_check_mark: | Content of the table in the given format | +| `format_` | [models.Format](../models/format_.md) | :heavy_check_mark: | Format of the table | \ No newline at end of file diff --git a/packages/azure/docs/models/ocrusageinfo.md b/packages/azure/docs/models/ocrusageinfo.md new file mode 100644 index 00000000..d9d79125 --- /dev/null +++ b/packages/azure/docs/models/ocrusageinfo.md @@ -0,0 +1,9 @@ +# OCRUsageInfo + + +## Fields + +| Field | Type | Required | Description | +| ------------------------- | ------------------------- | ------------------------- | ------------------------- | +| `pages_processed` | *int* | :heavy_check_mark: | Number of pages processed | +| `doc_size_bytes` | *OptionalNullable[int]* | :heavy_minus_sign: | Document size in bytes | \ No newline at end of file diff --git a/packages/azure/docs/models/prediction.md b/packages/azure/docs/models/prediction.md new file mode 100644 index 00000000..fae3c1ca --- /dev/null +++ b/packages/azure/docs/models/prediction.md @@ -0,0 +1,11 @@ +# Prediction + +Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------ | ------------------------------ | ------------------------------ | ------------------------------ | +| `type` | *Optional[Literal["content"]]* | :heavy_minus_sign: | N/A | +| `content` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/azure/docs/models/referencechunk.md b/packages/azure/docs/models/referencechunk.md new file mode 100644 index 00000000..d847e248 --- /dev/null +++ b/packages/azure/docs/models/referencechunk.md @@ -0,0 +1,9 @@ +# ReferenceChunk + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------- | -------------------------------- | -------------------------------- | -------------------------------- | +| `type` | *Optional[Literal["reference"]]* | :heavy_minus_sign: | N/A | +| `reference_ids` | List[*int*] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/azure/docs/models/responseformat.md b/packages/azure/docs/models/responseformat.md new file mode 100644 index 00000000..5cab22f2 --- /dev/null +++ b/packages/azure/docs/models/responseformat.md @@ -0,0 +1,11 @@ +# ResponseFormat + +Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | +| `type` | [Optional[models.ResponseFormats]](../models/responseformats.md) | :heavy_minus_sign: | N/A | +| `json_schema` | [OptionalNullable[models.JSONSchema]](../models/jsonschema.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/azure/docs/models/responseformats.md b/packages/azure/docs/models/responseformats.md new file mode 100644 index 00000000..2f5f1e55 --- /dev/null +++ b/packages/azure/docs/models/responseformats.md @@ -0,0 +1,10 @@ +# ResponseFormats + + +## Values + +| Name | Value | +| ------------- | ------------- | +| `TEXT` | text | +| `JSON_OBJECT` | json_object | +| `JSON_SCHEMA` | json_schema | \ No newline at end of file diff --git a/packages/azure/docs/models/security.md b/packages/azure/docs/models/security.md new file mode 100644 index 00000000..c698674c --- /dev/null +++ b/packages/azure/docs/models/security.md @@ -0,0 +1,8 @@ +# Security + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `api_key` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/azure/docs/models/systemmessage.md b/packages/azure/docs/models/systemmessage.md new file mode 100644 index 00000000..10bda10f --- /dev/null +++ b/packages/azure/docs/models/systemmessage.md @@ -0,0 +1,9 @@ +# SystemMessage + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | +| `role` | *Literal["system"]* | :heavy_check_mark: | N/A | +| `content` | [models.SystemMessageContent](../models/systemmessagecontent.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/azure/docs/models/systemmessagecontent.md b/packages/azure/docs/models/systemmessagecontent.md new file mode 100644 index 00000000..0c87baf3 --- /dev/null +++ b/packages/azure/docs/models/systemmessagecontent.md @@ -0,0 +1,17 @@ +# SystemMessageContent + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[models.SystemMessageContentChunks]` + +```python +value: List[models.SystemMessageContentChunks] = /* values here */ +``` + diff --git a/packages/azure/docs/models/systemmessagecontentchunks.md b/packages/azure/docs/models/systemmessagecontentchunks.md new file mode 100644 index 00000000..40030c17 --- /dev/null +++ b/packages/azure/docs/models/systemmessagecontentchunks.md @@ -0,0 +1,17 @@ +# SystemMessageContentChunks + + +## Supported Types + +### `models.TextChunk` + +```python +value: models.TextChunk = /* values here */ +``` + +### `models.ThinkChunk` + +```python +value: models.ThinkChunk = /* values here */ +``` + diff --git a/packages/azure/docs/models/tableformat.md b/packages/azure/docs/models/tableformat.md new file mode 100644 index 00000000..54f029b8 --- /dev/null +++ b/packages/azure/docs/models/tableformat.md @@ -0,0 +1,9 @@ +# TableFormat + + +## Values + +| Name | Value | +| ---------- | ---------- | +| `MARKDOWN` | markdown | +| `HTML` | html | \ No newline at end of file diff --git a/packages/azure/docs/models/textchunk.md b/packages/azure/docs/models/textchunk.md new file mode 100644 index 00000000..b266619d --- /dev/null +++ b/packages/azure/docs/models/textchunk.md @@ -0,0 +1,9 @@ +# TextChunk + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `type` | *Literal["text"]* | :heavy_check_mark: | N/A | +| `text` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/azure/docs/models/thinkchunk.md b/packages/azure/docs/models/thinkchunk.md new file mode 100644 index 00000000..b07f598e --- /dev/null +++ b/packages/azure/docs/models/thinkchunk.md @@ -0,0 +1,10 @@ +# ThinkChunk + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | +| `type` | *Literal["thinking"]* | :heavy_check_mark: | N/A | +| `thinking` | List[[models.Thinking](../models/thinking.md)] | :heavy_check_mark: | N/A | +| `closed` | *Optional[bool]* | :heavy_minus_sign: | Whether the thinking chunk is closed or not. Currently only used for prefixing. | \ No newline at end of file diff --git a/packages/azure/docs/models/thinking.md b/packages/azure/docs/models/thinking.md new file mode 100644 index 00000000..c7a0d5c9 --- /dev/null +++ b/packages/azure/docs/models/thinking.md @@ -0,0 +1,17 @@ +# Thinking + + +## Supported Types + +### `models.ReferenceChunk` + +```python +value: models.ReferenceChunk = /* values here */ +``` + +### `models.TextChunk` + +```python +value: models.TextChunk = /* values here */ +``` + diff --git a/packages/azure/docs/models/tool.md b/packages/azure/docs/models/tool.md new file mode 100644 index 00000000..fb661f72 --- /dev/null +++ b/packages/azure/docs/models/tool.md @@ -0,0 +1,9 @@ +# Tool + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | +| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | +| `function` | [models.Function](../models/function.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/azure/docs/models/toolcall.md b/packages/azure/docs/models/toolcall.md new file mode 100644 index 00000000..3819236b --- /dev/null +++ b/packages/azure/docs/models/toolcall.md @@ -0,0 +1,11 @@ +# ToolCall + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | +| `function` | [models.FunctionCall](../models/functioncall.md) | :heavy_check_mark: | N/A | +| `index` | *Optional[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/azure/docs/models/toolchoice.md b/packages/azure/docs/models/toolchoice.md new file mode 100644 index 00000000..373046bb --- /dev/null +++ b/packages/azure/docs/models/toolchoice.md @@ -0,0 +1,11 @@ +# ToolChoice + +ToolChoice is either a ToolChoiceEnum or a ToolChoice + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | +| `function` | [models.FunctionName](../models/functionname.md) | :heavy_check_mark: | this restriction of `Function` is used to select a specific function to call | \ No newline at end of file diff --git a/packages/azure/docs/models/toolchoiceenum.md b/packages/azure/docs/models/toolchoiceenum.md new file mode 100644 index 00000000..0be3d6c5 --- /dev/null +++ b/packages/azure/docs/models/toolchoiceenum.md @@ -0,0 +1,11 @@ +# ToolChoiceEnum + + +## Values + +| Name | Value | +| ---------- | ---------- | +| `AUTO` | auto | +| `NONE` | none | +| `ANY` | any | +| `REQUIRED` | required | \ No newline at end of file diff --git a/packages/azure/docs/models/toolmessage.md b/packages/azure/docs/models/toolmessage.md new file mode 100644 index 00000000..7201481e --- /dev/null +++ b/packages/azure/docs/models/toolmessage.md @@ -0,0 +1,11 @@ +# ToolMessage + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | +| `role` | *Literal["tool"]* | :heavy_check_mark: | N/A | +| `content` | [Nullable[models.ToolMessageContent]](../models/toolmessagecontent.md) | :heavy_check_mark: | N/A | +| `tool_call_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/azure/docs/models/toolmessagecontent.md b/packages/azure/docs/models/toolmessagecontent.md new file mode 100644 index 00000000..5c76091f --- /dev/null +++ b/packages/azure/docs/models/toolmessagecontent.md @@ -0,0 +1,17 @@ +# ToolMessageContent + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[models.ContentChunk]` + +```python +value: List[models.ContentChunk] = /* values here */ +``` + diff --git a/packages/azure/docs/models/tooltypes.md b/packages/azure/docs/models/tooltypes.md new file mode 100644 index 00000000..84e49253 --- /dev/null +++ b/packages/azure/docs/models/tooltypes.md @@ -0,0 +1,8 @@ +# ToolTypes + + +## Values + +| Name | Value | +| ---------- | ---------- | +| `FUNCTION` | function | \ No newline at end of file diff --git a/packages/azure/docs/models/usageinfo.md b/packages/azure/docs/models/usageinfo.md new file mode 100644 index 00000000..f5204ac9 --- /dev/null +++ b/packages/azure/docs/models/usageinfo.md @@ -0,0 +1,12 @@ +# UsageInfo + + +## Fields + +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `prompt_tokens` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `completion_tokens` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `total_tokens` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `prompt_audio_seconds` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/azure/docs/models/usermessage.md b/packages/azure/docs/models/usermessage.md new file mode 100644 index 00000000..e7a932ed --- /dev/null +++ b/packages/azure/docs/models/usermessage.md @@ -0,0 +1,9 @@ +# UserMessage + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | +| `role` | *Literal["user"]* | :heavy_check_mark: | N/A | +| `content` | [Nullable[models.UserMessageContent]](../models/usermessagecontent.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/azure/docs/models/usermessagecontent.md b/packages/azure/docs/models/usermessagecontent.md new file mode 100644 index 00000000..8350f9e8 --- /dev/null +++ b/packages/azure/docs/models/usermessagecontent.md @@ -0,0 +1,17 @@ +# UserMessageContent + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[models.ContentChunk]` + +```python +value: List[models.ContentChunk] = /* values here */ +``` + diff --git a/packages/azure/docs/models/utils/retryconfig.md b/packages/azure/docs/models/utils/retryconfig.md new file mode 100644 index 00000000..69dd549e --- /dev/null +++ b/packages/azure/docs/models/utils/retryconfig.md @@ -0,0 +1,24 @@ +# RetryConfig + +Allows customizing the default retry configuration. Only usable with methods that mention they support retries. + +## Fields + +| Name | Type | Description | Example | +| ------------------------- | ----------------------------------- | --------------------------------------- | --------- | +| `strategy` | `*str*` | The retry strategy to use. | `backoff` | +| `backoff` | [BackoffStrategy](#backoffstrategy) | Configuration for the backoff strategy. | | +| `retry_connection_errors` | `*bool*` | Whether to retry on connection errors. | `true` | + +## BackoffStrategy + +The backoff strategy allows retrying a request with an exponential backoff between each retry. + +### Fields + +| Name | Type | Description | Example | +| ------------------ | --------- | ----------------------------------------- | -------- | +| `initial_interval` | `*int*` | The initial interval in milliseconds. | `500` | +| `max_interval` | `*int*` | The maximum interval in milliseconds. | `60000` | +| `exponent` | `*float*` | The exponent to use for the backoff. | `1.5` | +| `max_elapsed_time` | `*int*` | The maximum elapsed time in milliseconds. | `300000` | \ No newline at end of file diff --git a/packages/azure/docs/models/validationerror.md b/packages/azure/docs/models/validationerror.md new file mode 100644 index 00000000..7a1654a1 --- /dev/null +++ b/packages/azure/docs/models/validationerror.md @@ -0,0 +1,10 @@ +# ValidationError + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------ | ------------------------------------ | ------------------------------------ | ------------------------------------ | +| `loc` | List[[models.Loc](../models/loc.md)] | :heavy_check_mark: | N/A | +| `msg` | *str* | :heavy_check_mark: | N/A | +| `type` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/azure/docs/sdks/chat/README.md b/packages/azure/docs/sdks/chat/README.md new file mode 100644 index 00000000..560ffa83 --- /dev/null +++ b/packages/azure/docs/sdks/chat/README.md @@ -0,0 +1,141 @@ +# Chat +(*chat*) + +## Overview + +Chat Completion API. + +### Available Operations + +* [stream](#stream) - Stream chat completion +* [complete](#complete) - Chat Completion + +## stream + +Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + +### Example Usage + +```python +from mistralai.azure.client import MistralAzure +import os + +AZURE_API_KEY = os.environ["AZURE_API_KEY"] +AZURE_ENDPOINT = os.environ["AZURE_ENDPOINT"] +AZURE_MODEL = os.environ["AZURE_MODEL"] +AZURE_API_VERSION = os.environ.get("AZURE_API_VERSION", "2024-05-01-preview") + +# The SDK automatically injects api-version as a query parameter +s = MistralAzure( + api_key=AZURE_API_KEY, + server_url=AZURE_ENDPOINT, + api_version=AZURE_API_VERSION, +) + +res = s.chat.stream(messages=[ + { + "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", + }, +], model=AZURE_MODEL) + +if res is not None: + for event in res: + # handle event + print(event) + +``` + +### Parameters + +| Parameter | Type | Required | Description | Example | +| ----------------- | ----------------------------------------------------------------- | ------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | +| `messages` | List[[models.Messages](../../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | {
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
} | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The ID of the model to use for this request. | azureai | +| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `stop` | [Optional[models.Stop]](../../models/stop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | N/A | | +| `tools` | List[[models.Tool](../../models/tool.md)] | :heavy_minus_sign: | N/A | | +| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | +| `tool_choice` | [Optional[models.ToolChoice]](../../models/toolchoice.md) | :heavy_minus_sign: | N/A | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | + + +### Response + +**[Union[Generator[models.CompletionEvent, None, None], AsyncGenerator[models.CompletionEvent, None]]](../../models/.md)** +### Errors + +| Error Object | Status Code | Content Type | +| --------------- | ----------- | ------------ | +| models.SDKError | 4xx-5xx | */* | + +## complete + +Chat Completion + +### Example Usage + +```python +from mistralai.azure.client import MistralAzure +import os + +AZURE_API_KEY = os.environ["AZURE_API_KEY"] +AZURE_ENDPOINT = os.environ["AZURE_ENDPOINT"] +AZURE_MODEL = os.environ["AZURE_MODEL"] +AZURE_API_VERSION = os.environ.get("AZURE_API_VERSION", "2024-05-01-preview") + +# The SDK automatically injects api-version as a query parameter +s = MistralAzure( + api_key=AZURE_API_KEY, + server_url=AZURE_ENDPOINT, + api_version=AZURE_API_VERSION, +) + +res = s.chat.complete(messages=[ + { + "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", + }, +], model=AZURE_MODEL) + +if res is not None: + # handle response + print(res.choices[0].message.content) + +``` + +### Parameters + +| Parameter | Type | Required | Description | Example | +| ----------------- | --------------------------------------------------------------------------------------------------- | ------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | +| `messages` | List[[models.ChatCompletionRequestMessages](../../models/chatcompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | {
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
} | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The ID of the model to use for this request. | azureai | +| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | +| `stop` | [Optional[models.ChatCompletionRequestStop]](../../models/chatcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | N/A | | +| `tools` | List[[models.Tool](../../models/tool.md)] | :heavy_minus_sign: | N/A | | +| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | +| `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | + + +### Response + +**[models.ChatCompletionResponse](../../models/chatcompletionresponse.md)** +### Errors + +| Error Object | Status Code | Content Type | +| -------------------------- | ----------- | ---------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4xx-5xx | */* | diff --git a/packages/azure/docs/sdks/mistralazure/README.md b/packages/azure/docs/sdks/mistralazure/README.md new file mode 100644 index 00000000..381000a8 --- /dev/null +++ b/packages/azure/docs/sdks/mistralazure/README.md @@ -0,0 +1,9 @@ +# MistralAzure SDK + + +## Overview + +Mistral AI API: Our Chat Completion and Embeddings APIs specification. Create your account on [La Plateforme](https://console.mistral.ai) to get access and read the [docs](https://docs.mistral.ai) to learn how to use it. + +### Available Operations + diff --git a/packages/azure/py.typed b/packages/azure/py.typed new file mode 100644 index 00000000..3e38f1a9 --- /dev/null +++ b/packages/azure/py.typed @@ -0,0 +1 @@ +# Marker file for PEP 561. The package enables type hints. diff --git a/packages/azure/pylintrc b/packages/azure/pylintrc new file mode 100644 index 00000000..0391ac11 --- /dev/null +++ b/packages/azure/pylintrc @@ -0,0 +1,664 @@ +[MAIN] + +# Analyse import fallback blocks. This can be used to support both Python 2 and +# 3 compatible code, which means that the block might have code that exists +# only in one or another interpreter, leading to false positives when analysed. +analyse-fallback-blocks=no + +# Clear in-memory caches upon conclusion of linting. Useful if running pylint +# in a server-like mode. +clear-cache-post-run=no + +# Load and enable all available extensions. Use --list-extensions to see a list +# all available extensions. +#enable-all-extensions= + +# In error mode, messages with a category besides ERROR or FATAL are +# suppressed, and no reports are done by default. Error mode is compatible with +# disabling specific errors. +#errors-only= + +# Always return a 0 (non-error) status code, even if lint errors are found. +# This is primarily useful in continuous integration scripts. +#exit-zero= + +# A comma-separated list of package or module names from where C extensions may +# be loaded. Extensions are loading into the active Python interpreter and may +# run arbitrary code. +extension-pkg-allow-list= + +# A comma-separated list of package or module names from where C extensions may +# be loaded. Extensions are loading into the active Python interpreter and may +# run arbitrary code. (This is an alternative name to extension-pkg-allow-list +# for backward compatibility.) +extension-pkg-whitelist= + +# Return non-zero exit code if any of these messages/categories are detected, +# even if score is above --fail-under value. Syntax same as enable. Messages +# specified are enabled, while categories only check already-enabled messages. +fail-on= + +# Specify a score threshold under which the program will exit with error. +fail-under=10 + +# Interpret the stdin as a python script, whose filename needs to be passed as +# the module_or_package argument. +#from-stdin= + +# Files or directories to be skipped. They should be base names, not paths. +ignore=CVS + +# Add files or directories matching the regular expressions patterns to the +# ignore-list. The regex matches against paths and can be in Posix or Windows +# format. Because '\\' represents the directory delimiter on Windows systems, +# it can't be used as an escape character. +ignore-paths= + +# Files or directories matching the regular expression patterns are skipped. +# The regex matches against base names, not paths. The default value ignores +# Emacs file locks +ignore-patterns=^\.# + +# List of module names for which member attributes should not be checked and +# will not be imported (useful for modules/projects where namespaces are +# manipulated during runtime and thus existing member attributes cannot be +# deduced by static analysis). It supports qualified module names, as well as +# Unix pattern matching. +ignored-modules= + +# Python code to execute, usually for sys.path manipulation such as +# pygtk.require(). +#init-hook= + +# Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the +# number of processors available to use, and will cap the count on Windows to +# avoid hangs. +jobs=1 + +# Control the amount of potential inferred values when inferring a single +# object. This can help the performance when dealing with large functions or +# complex, nested conditions. +limit-inference-results=100 + +# List of plugins (as comma separated values of python module names) to load, +# usually to register additional checkers. +load-plugins= + +# Pickle collected data for later comparisons. +persistent=yes + +# Minimum Python version to use for version dependent checks. Will default to +# the version used to run pylint. +py-version=3.10 + +# Discover python modules and packages in the file system subtree. +recursive=no + +# Add paths to the list of the source roots. Supports globbing patterns. The +# source root is an absolute path or a path relative to the current working +# directory used to determine a package namespace for modules located under the +# source root. +source-roots=src + +# When enabled, pylint would attempt to guess common misconfiguration and emit +# user-friendly hints instead of false-positive error messages. +suggestion-mode=yes + +# Allow loading of arbitrary C extensions. Extensions are imported into the +# active Python interpreter and may run arbitrary code. +unsafe-load-any-extension=no + +# In verbose mode, extra non-checker-related info will be displayed. +#verbose= + + +[BASIC] + +# Naming style matching correct argument names. +argument-naming-style=snake_case + +# Regular expression matching correct argument names. Overrides argument- +# naming-style. If left empty, argument names will be checked with the set +# naming style. +#argument-rgx= + +# Naming style matching correct attribute names. +#attr-naming-style=snake_case + +# Regular expression matching correct attribute names. Overrides attr-naming- +# style. If left empty, attribute names will be checked with the set naming +# style. +attr-rgx=[^\W\d][^\W]*|__.*__$ + +# Bad variable names which should always be refused, separated by a comma. +bad-names= + +# Bad variable names regexes, separated by a comma. If names match any regex, +# they will always be refused +bad-names-rgxs= + +# Naming style matching correct class attribute names. +class-attribute-naming-style=any + +# Regular expression matching correct class attribute names. Overrides class- +# attribute-naming-style. If left empty, class attribute names will be checked +# with the set naming style. +#class-attribute-rgx= + +# Naming style matching correct class constant names. +class-const-naming-style=UPPER_CASE + +# Regular expression matching correct class constant names. Overrides class- +# const-naming-style. If left empty, class constant names will be checked with +# the set naming style. +#class-const-rgx= + +# Naming style matching correct class names. +class-naming-style=PascalCase + +# Regular expression matching correct class names. Overrides class-naming- +# style. If left empty, class names will be checked with the set naming style. +#class-rgx= + +# Naming style matching correct constant names. +const-naming-style=UPPER_CASE + +# Regular expression matching correct constant names. Overrides const-naming- +# style. If left empty, constant names will be checked with the set naming +# style. +#const-rgx= + +# Minimum line length for functions/classes that require docstrings, shorter +# ones are exempt. +docstring-min-length=-1 + +# Naming style matching correct function names. +function-naming-style=snake_case + +# Regular expression matching correct function names. Overrides function- +# naming-style. If left empty, function names will be checked with the set +# naming style. +#function-rgx= + +# Good variable names which should always be accepted, separated by a comma. +good-names=i, + j, + k, + ex, + Run, + _, + e, + id, + n + +# Good variable names regexes, separated by a comma. If names match any regex, +# they will always be accepted +good-names-rgxs= + +# Include a hint for the correct naming format with invalid-name. +include-naming-hint=no + +# Naming style matching correct inline iteration names. +inlinevar-naming-style=any + +# Regular expression matching correct inline iteration names. Overrides +# inlinevar-naming-style. If left empty, inline iteration names will be checked +# with the set naming style. +#inlinevar-rgx= + +# Naming style matching correct method names. +method-naming-style=snake_case + +# Regular expression matching correct method names. Overrides method-naming- +# style. If left empty, method names will be checked with the set naming style. +#method-rgx= + +# Naming style matching correct module names. +module-naming-style=snake_case + +# Regular expression matching correct module names. Overrides module-naming- +# style. If left empty, module names will be checked with the set naming style. +#module-rgx= + +# Colon-delimited sets of names that determine each other's naming style when +# the name regexes allow several styles. +name-group= + +# Regular expression which should only match function or class names that do +# not require a docstring. +no-docstring-rgx=^_ + +# List of decorators that produce properties, such as abc.abstractproperty. Add +# to this list to register other decorators that produce valid properties. +# These decorators are taken in consideration only for invalid-name. +property-classes=abc.abstractproperty + +# Regular expression matching correct type alias names. If left empty, type +# alias names will be checked with the set naming style. +typealias-rgx=.* + +# Regular expression matching correct type variable names. If left empty, type +# variable names will be checked with the set naming style. +#typevar-rgx= + +# Naming style matching correct variable names. +variable-naming-style=snake_case + +# Regular expression matching correct variable names. Overrides variable- +# naming-style. If left empty, variable names will be checked with the set +# naming style. +#variable-rgx= + + +[CLASSES] + +# Warn about protected attribute access inside special methods +check-protected-access-in-special-methods=no + +# List of method names used to declare (i.e. assign) instance attributes. +defining-attr-methods=__init__, + __new__, + setUp, + asyncSetUp, + __post_init__ + +# List of member names, which should be excluded from the protected access +# warning. +exclude-protected=_asdict,_fields,_replace,_source,_make,os._exit + +# List of valid names for the first argument in a class method. +valid-classmethod-first-arg=cls + +# List of valid names for the first argument in a metaclass class method. +valid-metaclass-classmethod-first-arg=mcs + + +[DESIGN] + +# List of regular expressions of class ancestor names to ignore when counting +# public methods (see R0903) +exclude-too-few-public-methods= + +# List of qualified class names to ignore when counting class parents (see +# R0901) +ignored-parents= + +# Maximum number of arguments for function / method. +max-args=5 + +# Maximum number of attributes for a class (see R0902). +max-attributes=7 + +# Maximum number of boolean expressions in an if statement (see R0916). +max-bool-expr=5 + +# Maximum number of branch for function / method body. +max-branches=12 + +# Maximum number of locals for function / method body. +max-locals=15 + +# Maximum number of parents for a class (see R0901). +max-parents=7 + +# Maximum number of public methods for a class (see R0904). +max-public-methods=25 + +# Maximum number of return / yield for function / method body. +max-returns=6 + +# Maximum number of statements in function / method body. +max-statements=50 + +# Minimum number of public methods for a class (see R0903). +min-public-methods=2 + + +[EXCEPTIONS] + +# Exceptions that will emit a warning when caught. +overgeneral-exceptions=builtins.BaseException,builtins.Exception + + +[FORMAT] + +# Expected format of line ending, e.g. empty (any line ending), LF or CRLF. +expected-line-ending-format= + +# Regexp for a line that is allowed to be longer than the limit. +ignore-long-lines=^\s*(# )??$ + +# Number of spaces of indent required inside a hanging or continued line. +indent-after-paren=4 + +# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 +# tab). +indent-string=' ' + +# Maximum number of characters on a single line. +max-line-length=100 + +# Maximum number of lines in a module. +max-module-lines=1000 + +# Allow the body of a class to be on the same line as the declaration if body +# contains single statement. +single-line-class-stmt=no + +# Allow the body of an if to be on the same line as the test if there is no +# else. +single-line-if-stmt=no + + +[IMPORTS] + +# List of modules that can be imported at any level, not just the top level +# one. +allow-any-import-level= + +# Allow explicit reexports by alias from a package __init__. +allow-reexport-from-package=no + +# Allow wildcard imports from modules that define __all__. +allow-wildcard-with-all=no + +# Deprecated modules which should not be used, separated by a comma. +deprecated-modules= + +# Output a graph (.gv or any supported image format) of external dependencies +# to the given file (report RP0402 must not be disabled). +ext-import-graph= + +# Output a graph (.gv or any supported image format) of all (i.e. internal and +# external) dependencies to the given file (report RP0402 must not be +# disabled). +import-graph= + +# Output a graph (.gv or any supported image format) of internal dependencies +# to the given file (report RP0402 must not be disabled). +int-import-graph= + +# Force import order to recognize a module as part of the standard +# compatibility libraries. +known-standard-library= + +# Force import order to recognize a module as part of a third party library. +known-third-party=enchant + +# Couples of modules and preferred modules, separated by a comma. +preferred-modules= + + +[LOGGING] + +# The type of string formatting that logging methods do. `old` means using % +# formatting, `new` is for `{}` formatting. +logging-format-style=old + +# Logging modules to check that the string format arguments are in logging +# function parameter format. +logging-modules=logging + + +[MESSAGES CONTROL] + +# Only show warnings with the listed confidence levels. Leave empty to show +# all. Valid levels: HIGH, CONTROL_FLOW, INFERENCE, INFERENCE_FAILURE, +# UNDEFINED. +confidence=HIGH, + CONTROL_FLOW, + INFERENCE, + INFERENCE_FAILURE, + UNDEFINED + +# Disable the message, report, category or checker with the given id(s). You +# can either give multiple identifiers separated by comma (,) or put this +# option multiple times (only on the command line, not in the configuration +# file where it should appear only once). You can also use "--disable=all" to +# disable everything first and then re-enable specific checks. For example, if +# you want to run only the similarities checker, you can use "--disable=all +# --enable=similarities". If you want to run only the classes checker, but have +# no Warning level messages displayed, use "--disable=all --enable=classes +# --disable=W". +disable=raw-checker-failed, + bad-inline-option, + locally-disabled, + file-ignored, + suppressed-message, + useless-suppression, + deprecated-pragma, + use-implicit-booleaness-not-comparison-to-string, + use-implicit-booleaness-not-comparison-to-zero, + use-symbolic-message-instead, + trailing-whitespace, + line-too-long, + missing-class-docstring, + missing-module-docstring, + missing-function-docstring, + too-many-instance-attributes, + wrong-import-order, + too-many-arguments, + broad-exception-raised, + too-few-public-methods, + too-many-branches, + duplicate-code, + trailing-newlines, + too-many-public-methods, + too-many-locals, + too-many-lines, + using-constant-test, + too-many-statements, + cyclic-import, + too-many-nested-blocks, + too-many-boolean-expressions, + no-else-raise, + bare-except, + broad-exception-caught, + fixme, + relative-beyond-top-level, + consider-using-with, + wildcard-import, + unused-wildcard-import, + too-many-return-statements, + redefined-builtin + +# Enable the message, report, category or checker with the given id(s). You can +# either give multiple identifier separated by comma (,) or put this option +# multiple time (only on the command line, not in the configuration file where +# it should appear only once). See also the "--disable" option for examples. +enable= + + +[METHOD_ARGS] + +# List of qualified names (i.e., library.method) which require a timeout +# parameter e.g. 'requests.api.get,requests.api.post' +timeout-methods=requests.api.delete,requests.api.get,requests.api.head,requests.api.options,requests.api.patch,requests.api.post,requests.api.put,requests.api.request + + +[MISCELLANEOUS] + +# List of note tags to take in consideration, separated by a comma. +notes=FIXME, + XXX, + TODO + +# Regular expression of note tags to take in consideration. +notes-rgx= + + +[REFACTORING] + +# Maximum number of nested blocks for function / method body +max-nested-blocks=5 + +# Complete name of functions that never returns. When checking for +# inconsistent-return-statements if a never returning function is called then +# it will be considered as an explicit return statement and no message will be +# printed. +never-returning-functions=sys.exit,argparse.parse_error + + +[REPORTS] + +# Python expression which should return a score less than or equal to 10. You +# have access to the variables 'fatal', 'error', 'warning', 'refactor', +# 'convention', and 'info' which contain the number of messages in each +# category, as well as 'statement' which is the total number of statements +# analyzed. This score is used by the global evaluation report (RP0004). +evaluation=max(0, 0 if fatal else 10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)) + +# Template used to display messages. This is a python new-style format string +# used to format the message information. See doc for all details. +msg-template= + +# Set the output format. Available formats are: text, parseable, colorized, +# json2 (improved json format), json (old json format) and msvs (visual +# studio). You can also give a reporter class, e.g. +# mypackage.mymodule.MyReporterClass. +#output-format= + +# Tells whether to display a full report or only the messages. +reports=no + +# Activate the evaluation score. +score=yes + + +[SIMILARITIES] + +# Comments are removed from the similarity computation +ignore-comments=yes + +# Docstrings are removed from the similarity computation +ignore-docstrings=yes + +# Imports are removed from the similarity computation +ignore-imports=yes + +# Signatures are removed from the similarity computation +ignore-signatures=yes + +# Minimum lines number of a similarity. +min-similarity-lines=4 + + +[SPELLING] + +# Limits count of emitted suggestions for spelling mistakes. +max-spelling-suggestions=4 + +# Spelling dictionary name. No available dictionaries : You need to install +# both the python package and the system dependency for enchant to work. +spelling-dict= + +# List of comma separated words that should be considered directives if they +# appear at the beginning of a comment and should not be checked. +spelling-ignore-comment-directives=fmt: on,fmt: off,noqa:,noqa,nosec,isort:skip,mypy: + +# List of comma separated words that should not be checked. +spelling-ignore-words= + +# A path to a file that contains the private dictionary; one word per line. +spelling-private-dict-file= + +# Tells whether to store unknown words to the private dictionary (see the +# --spelling-private-dict-file option) instead of raising a message. +spelling-store-unknown-words=no + + +[STRING] + +# This flag controls whether inconsistent-quotes generates a warning when the +# character used as a quote delimiter is used inconsistently within a module. +check-quote-consistency=no + +# This flag controls whether the implicit-str-concat should generate a warning +# on implicit string concatenation in sequences defined over several lines. +check-str-concat-over-line-jumps=no + + +[TYPECHECK] + +# List of decorators that produce context managers, such as +# contextlib.contextmanager. Add to this list to register other decorators that +# produce valid context managers. +contextmanager-decorators=contextlib.contextmanager + +# List of members which are set dynamically and missed by pylint inference +# system, and so shouldn't trigger E1101 when accessed. Python regular +# expressions are accepted. +generated-members= + +# Tells whether to warn about missing members when the owner of the attribute +# is inferred to be None. +ignore-none=yes + +# This flag controls whether pylint should warn about no-member and similar +# checks whenever an opaque object is returned when inferring. The inference +# can return multiple potential results while evaluating a Python object, but +# some branches might not be evaluated, which results in partial inference. In +# that case, it might be useful to still emit no-member and other checks for +# the rest of the inferred objects. +ignore-on-opaque-inference=yes + +# List of symbolic message names to ignore for Mixin members. +ignored-checks-for-mixins=no-member, + not-async-context-manager, + not-context-manager, + attribute-defined-outside-init + +# List of class names for which member attributes should not be checked (useful +# for classes with dynamically set attributes). This supports the use of +# qualified names. +ignored-classes=optparse.Values,thread._local,_thread._local,argparse.Namespace + +# Show a hint with possible names when a member name was not found. The aspect +# of finding the hint is based on edit distance. +missing-member-hint=yes + +# The minimum edit distance a name should have in order to be considered a +# similar match for a missing member name. +missing-member-hint-distance=1 + +# The total number of similar names that should be taken in consideration when +# showing a hint for a missing member. +missing-member-max-choices=1 + +# Regex pattern to define which classes are considered mixins. +mixin-class-rgx=.*[Mm]ixin + +# List of decorators that change the signature of a decorated function. +signature-mutators= + + +[VARIABLES] + +# List of additional names supposed to be defined in builtins. Remember that +# you should avoid defining new builtins when possible. +additional-builtins= + +# Tells whether unused global variables should be treated as a violation. +allow-global-unused-variables=yes + +# List of names allowed to shadow builtins +allowed-redefined-builtins=id,object,input,dir + +# List of strings which can identify a callback function by name. A callback +# name must start or end with one of those strings. +callbacks=cb_, + _cb + +# A regular expression matching the name of dummy variables (i.e. expected to +# not be used). +dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_ + +# Argument names that match this expression will be ignored. +ignored-argument-names=_.*|^ignored_|^unused_ + +# Tells whether we should check for unused import in __init__ files. +init-import=no + +# List of qualified module names which can have objects that can redefine +# builtins. +redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io \ No newline at end of file diff --git a/packages/azure/pyproject.toml b/packages/azure/pyproject.toml new file mode 100644 index 00000000..000dd558 --- /dev/null +++ b/packages/azure/pyproject.toml @@ -0,0 +1,65 @@ +[project] +name = "mistralai-azure" +version = "2.0.0rc1" +description = "Python Client SDK for the Mistral AI API in Azure." +authors = [{ name = "Mistral" }] +requires-python = ">=3.10" +readme = "README.md" +dependencies = [ + "httpcore >=1.0.9", + "httpx >=0.28.1", + "pydantic >=2.11.2", +] + +[dependency-groups] +dev = [ + "mypy==1.15.0", + "pylint==3.2.3", + "pyright>=1.1.401,<2", + "pytest>=8.2.2,<9", + "pytest-asyncio>=0.23.7,<0.24", +] + +[tool.setuptools.package-data] +"*" = ["py.typed", "src/mistralai/azure/client/py.typed"] + +[tool.hatch.build.targets.sdist] +include = ["src/mistralai"] + +[tool.hatch.build.targets.sdist.force-include] +"py.typed" = "py.typed" +"src/mistralai/azure/client/py.typed" = "src/mistralai/azure/client/py.typed" + +[tool.hatch.build.targets.wheel] +include = ["src/mistralai"] + +[tool.hatch.build.targets.wheel.sources] +"src" = "" + +[virtualenvs] +in-project = true + +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[tool.pytest.ini_options] +pythonpath = ["src"] + +[tool.mypy] +disable_error_code = "misc" +namespace_packages = true +explicit_package_bases = true +mypy_path = "src" + +[[tool.mypy.overrides]] +module = "typing_inspect" +ignore_missing_imports = true + +[[tool.mypy.overrides]] +module = "jsonpath" +ignore_missing_imports = true + +[tool.pyright] +venvPath = "." +venv = ".venv" diff --git a/packages/azure/scripts/prepare_readme.py b/packages/azure/scripts/prepare_readme.py new file mode 100644 index 00000000..2b2577ea --- /dev/null +++ b/packages/azure/scripts/prepare_readme.py @@ -0,0 +1,38 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import re +import shutil + +try: + with open("README.md", "r", encoding="utf-8") as rh: + readme_contents = rh.read() + GITHUB_URL = "https://github.com/mistralai/client-python.git" + GITHUB_URL = ( + GITHUB_URL[: -len(".git")] if GITHUB_URL.endswith(".git") else GITHUB_URL + ) + REPO_SUBDIR = "packages/azure" + # Ensure the subdirectory has a trailing slash + if not REPO_SUBDIR.endswith("/"): + REPO_SUBDIR += "/" + # links on PyPI should have absolute URLs + readme_contents = re.sub( + r"(\[[^\]]+\]\()((?!https?:)[^\)]+)(\))", + lambda m: m.group(1) + + GITHUB_URL + + "/blob/master/" + + REPO_SUBDIR + + m.group(2) + + m.group(3), + readme_contents, + ) + + with open("README-PYPI.md", "w", encoding="utf-8") as wh: + wh.write(readme_contents) +except Exception as e: + try: + print("Failed to rewrite README.md to README-PYPI.md, copying original instead") + print(e) + shutil.copyfile("README.md", "README-PYPI.md") + except Exception as ie: + print("Failed to copy README.md to README-PYPI.md") + print(ie) diff --git a/packages/azure/scripts/publish.sh b/packages/azure/scripts/publish.sh new file mode 100755 index 00000000..c35748f3 --- /dev/null +++ b/packages/azure/scripts/publish.sh @@ -0,0 +1,6 @@ +#!/usr/bin/env bash + +uv run python scripts/prepare_readme.py + +uv build +uv publish --token $PYPI_TOKEN diff --git a/packages/azure/src/mistralai/azure/client/__init__.py b/packages/azure/src/mistralai/azure/client/__init__.py new file mode 100644 index 00000000..833c68cd --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/__init__.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from ._version import ( + __title__, + __version__, + __openapi_doc_version__, + __gen_version__, + __user_agent__, +) +from .sdk import * +from .sdkconfiguration import * + + +VERSION: str = __version__ +OPENAPI_DOC_VERSION = __openapi_doc_version__ +SPEAKEASY_GENERATOR_VERSION = __gen_version__ +USER_AGENT = __user_agent__ diff --git a/packages/azure/src/mistralai/azure/client/_hooks/__init__.py b/packages/azure/src/mistralai/azure/client/_hooks/__init__.py new file mode 100644 index 00000000..2ee66cdd --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/_hooks/__init__.py @@ -0,0 +1,5 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .sdkhooks import * +from .types import * +from .registration import * diff --git a/packages/azure/src/mistralai/azure/client/_hooks/registration.py b/packages/azure/src/mistralai/azure/client/_hooks/registration.py new file mode 100644 index 00000000..d5a49cc3 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/_hooks/registration.py @@ -0,0 +1,12 @@ +from .types import Hooks + + +# This file is only ever generated once on the first generation and then is free to be modified. +# Any hooks you wish to add should be registered in the init_hooks function. Feel free to define them +# in this file or in separate files in the hooks folder. + + +def init_hooks(_hooks: Hooks) -> None: + """Add hooks by calling hooks.register{sdk_init/before_request/after_success/after_error}Hook + with an instance of a hook that implements that specific Hook interface + Hooks are registered per SDK instance, and are valid for the lifetime of the SDK instance""" diff --git a/packages/azure/src/mistralai/azure/client/_hooks/sdkhooks.py b/packages/azure/src/mistralai/azure/client/_hooks/sdkhooks.py new file mode 100644 index 00000000..2080681b --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/_hooks/sdkhooks.py @@ -0,0 +1,76 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import httpx +from .types import ( + SDKInitHook, + BeforeRequestContext, + BeforeRequestHook, + AfterSuccessContext, + AfterSuccessHook, + AfterErrorContext, + AfterErrorHook, + Hooks, +) +from .registration import init_hooks +from typing import List, Optional, Tuple +from mistralai.azure.client.httpclient import HttpClient + + +class SDKHooks(Hooks): + def __init__(self) -> None: + self.sdk_init_hooks: List[SDKInitHook] = [] + self.before_request_hooks: List[BeforeRequestHook] = [] + self.after_success_hooks: List[AfterSuccessHook] = [] + self.after_error_hooks: List[AfterErrorHook] = [] + init_hooks(self) + + def register_sdk_init_hook(self, hook: SDKInitHook) -> None: + self.sdk_init_hooks.append(hook) + + def register_before_request_hook(self, hook: BeforeRequestHook) -> None: + self.before_request_hooks.append(hook) + + def register_after_success_hook(self, hook: AfterSuccessHook) -> None: + self.after_success_hooks.append(hook) + + def register_after_error_hook(self, hook: AfterErrorHook) -> None: + self.after_error_hooks.append(hook) + + def sdk_init(self, base_url: str, client: HttpClient) -> Tuple[str, HttpClient]: + for hook in self.sdk_init_hooks: + base_url, client = hook.sdk_init(base_url, client) + return base_url, client + + def before_request( + self, hook_ctx: BeforeRequestContext, request: httpx.Request + ) -> httpx.Request: + for hook in self.before_request_hooks: + out = hook.before_request(hook_ctx, request) + if isinstance(out, Exception): + raise out + request = out + + return request + + def after_success( + self, hook_ctx: AfterSuccessContext, response: httpx.Response + ) -> httpx.Response: + for hook in self.after_success_hooks: + out = hook.after_success(hook_ctx, response) + if isinstance(out, Exception): + raise out + response = out + return response + + def after_error( + self, + hook_ctx: AfterErrorContext, + response: Optional[httpx.Response], + error: Optional[Exception], + ) -> Tuple[Optional[httpx.Response], Optional[Exception]]: + for hook in self.after_error_hooks: + result = hook.after_error(hook_ctx, response, error) + if isinstance(result, Exception): + raise result + response, error = result + return response, error diff --git a/packages/azure/src/mistralai/azure/client/_hooks/types.py b/packages/azure/src/mistralai/azure/client/_hooks/types.py new file mode 100644 index 00000000..3e4e3955 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/_hooks/types.py @@ -0,0 +1,113 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from abc import ABC, abstractmethod +import httpx +from mistralai.azure.client.httpclient import HttpClient +from mistralai.azure.client.sdkconfiguration import SDKConfiguration +from typing import Any, Callable, List, Optional, Tuple, Union + + +class HookContext: + config: SDKConfiguration + base_url: str + operation_id: str + oauth2_scopes: Optional[List[str]] = None + security_source: Optional[Union[Any, Callable[[], Any]]] = None + + def __init__( + self, + config: SDKConfiguration, + base_url: str, + operation_id: str, + oauth2_scopes: Optional[List[str]], + security_source: Optional[Union[Any, Callable[[], Any]]], + ): + self.config = config + self.base_url = base_url + self.operation_id = operation_id + self.oauth2_scopes = oauth2_scopes + self.security_source = security_source + + +class BeforeRequestContext(HookContext): + def __init__(self, hook_ctx: HookContext): + super().__init__( + hook_ctx.config, + hook_ctx.base_url, + hook_ctx.operation_id, + hook_ctx.oauth2_scopes, + hook_ctx.security_source, + ) + + +class AfterSuccessContext(HookContext): + def __init__(self, hook_ctx: HookContext): + super().__init__( + hook_ctx.config, + hook_ctx.base_url, + hook_ctx.operation_id, + hook_ctx.oauth2_scopes, + hook_ctx.security_source, + ) + + +class AfterErrorContext(HookContext): + def __init__(self, hook_ctx: HookContext): + super().__init__( + hook_ctx.config, + hook_ctx.base_url, + hook_ctx.operation_id, + hook_ctx.oauth2_scopes, + hook_ctx.security_source, + ) + + +class SDKInitHook(ABC): + @abstractmethod + def sdk_init(self, base_url: str, client: HttpClient) -> Tuple[str, HttpClient]: + pass + + +class BeforeRequestHook(ABC): + @abstractmethod + def before_request( + self, hook_ctx: BeforeRequestContext, request: httpx.Request + ) -> Union[httpx.Request, Exception]: + pass + + +class AfterSuccessHook(ABC): + @abstractmethod + def after_success( + self, hook_ctx: AfterSuccessContext, response: httpx.Response + ) -> Union[httpx.Response, Exception]: + pass + + +class AfterErrorHook(ABC): + @abstractmethod + def after_error( + self, + hook_ctx: AfterErrorContext, + response: Optional[httpx.Response], + error: Optional[Exception], + ) -> Union[Tuple[Optional[httpx.Response], Optional[Exception]], Exception]: + pass + + +class Hooks(ABC): + @abstractmethod + def register_sdk_init_hook(self, hook: SDKInitHook): + pass + + @abstractmethod + def register_before_request_hook(self, hook: BeforeRequestHook): + pass + + @abstractmethod + def register_after_success_hook(self, hook: AfterSuccessHook): + pass + + @abstractmethod + def register_after_error_hook(self, hook: AfterErrorHook): + pass diff --git a/packages/azure/src/mistralai/azure/client/_version.py b/packages/azure/src/mistralai/azure/client/_version.py new file mode 100644 index 00000000..4d15bdc8 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/_version.py @@ -0,0 +1,15 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import importlib.metadata + +__title__: str = "mistralai-azure" +__version__: str = "2.0.0rc1" +__openapi_doc_version__: str = "1.0.0" +__gen_version__: str = "2.841.0" +__user_agent__: str = "speakeasy-sdk/python 2.0.0rc1 2.841.0 1.0.0 mistralai-azure" + +try: + if __package__ is not None: + __version__ = importlib.metadata.version(__package__) +except importlib.metadata.PackageNotFoundError: + pass diff --git a/packages/azure/src/mistralai/azure/client/basesdk.py b/packages/azure/src/mistralai/azure/client/basesdk.py new file mode 100644 index 00000000..0d4d9a44 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/basesdk.py @@ -0,0 +1,384 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .sdkconfiguration import SDKConfiguration +import httpx +from mistralai.azure.client import errors, utils +from mistralai.azure.client._hooks import ( + AfterErrorContext, + AfterSuccessContext, + BeforeRequestContext, +) +from mistralai.azure.client.utils import ( + RetryConfig, + SerializedRequestBody, + get_body_content, + run_sync_in_thread, +) +from typing import Callable, List, Mapping, Optional, Tuple +from urllib.parse import parse_qs, urlparse + + +class BaseSDK: + sdk_configuration: SDKConfiguration + parent_ref: Optional[object] = None + """ + Reference to the root SDK instance, if any. This will prevent it from + being garbage collected while there are active streams. + """ + + def __init__( + self, + sdk_config: SDKConfiguration, + parent_ref: Optional[object] = None, + ) -> None: + self.sdk_configuration = sdk_config + self.parent_ref = parent_ref + + def _get_url(self, base_url, url_variables): + sdk_url, sdk_variables = self.sdk_configuration.get_server_details() + + if base_url is None: + base_url = sdk_url + + if url_variables is None: + url_variables = sdk_variables + + return utils.template_url(base_url, url_variables) + + def _build_request_async( + self, + method, + path, + base_url, + url_variables, + request, + request_body_required, + request_has_path_params, + request_has_query_params, + user_agent_header, + accept_header_value, + _globals=None, + security=None, + timeout_ms: Optional[int] = None, + get_serialized_body: Optional[ + Callable[[], Optional[SerializedRequestBody]] + ] = None, + url_override: Optional[str] = None, + http_headers: Optional[Mapping[str, str]] = None, + allow_empty_value: Optional[List[str]] = None, + ) -> httpx.Request: + client = self.sdk_configuration.async_client + return self._build_request_with_client( + client, + method, + path, + base_url, + url_variables, + request, + request_body_required, + request_has_path_params, + request_has_query_params, + user_agent_header, + accept_header_value, + _globals, + security, + timeout_ms, + get_serialized_body, + url_override, + http_headers, + allow_empty_value, + ) + + def _build_request( + self, + method, + path, + base_url, + url_variables, + request, + request_body_required, + request_has_path_params, + request_has_query_params, + user_agent_header, + accept_header_value, + _globals=None, + security=None, + timeout_ms: Optional[int] = None, + get_serialized_body: Optional[ + Callable[[], Optional[SerializedRequestBody]] + ] = None, + url_override: Optional[str] = None, + http_headers: Optional[Mapping[str, str]] = None, + allow_empty_value: Optional[List[str]] = None, + ) -> httpx.Request: + client = self.sdk_configuration.client + return self._build_request_with_client( + client, + method, + path, + base_url, + url_variables, + request, + request_body_required, + request_has_path_params, + request_has_query_params, + user_agent_header, + accept_header_value, + _globals, + security, + timeout_ms, + get_serialized_body, + url_override, + http_headers, + allow_empty_value, + ) + + def _build_request_with_client( + self, + client, + method, + path, + base_url, + url_variables, + request, + request_body_required, + request_has_path_params, + request_has_query_params, + user_agent_header, + accept_header_value, + _globals=None, + security=None, + timeout_ms: Optional[int] = None, + get_serialized_body: Optional[ + Callable[[], Optional[SerializedRequestBody]] + ] = None, + url_override: Optional[str] = None, + http_headers: Optional[Mapping[str, str]] = None, + allow_empty_value: Optional[List[str]] = None, + ) -> httpx.Request: + query_params = {} + + url = url_override + if url is None: + url = utils.generate_url( + self._get_url(base_url, url_variables), + path, + request if request_has_path_params else None, + _globals if request_has_path_params else None, + ) + + query_params = utils.get_query_params( + request if request_has_query_params else None, + _globals if request_has_query_params else None, + allow_empty_value, + ) + else: + # Pick up the query parameter from the override so they can be + # preserved when building the request later on (necessary as of + # httpx 0.28). + parsed_override = urlparse(str(url_override)) + query_params = parse_qs(parsed_override.query, keep_blank_values=True) + + headers = utils.get_headers(request, _globals) + headers["Accept"] = accept_header_value + headers[user_agent_header] = self.sdk_configuration.user_agent + + if security is not None: + if callable(security): + security = security() + + if security is not None: + security_headers, security_query_params = utils.get_security(security) + headers = {**headers, **security_headers} + query_params = {**query_params, **security_query_params} + + serialized_request_body = SerializedRequestBody() + if get_serialized_body is not None: + rb = get_serialized_body() + if request_body_required and rb is None: + raise ValueError("request body is required") + + if rb is not None: + serialized_request_body = rb + + if ( + serialized_request_body.media_type is not None + and serialized_request_body.media_type + not in ( + "multipart/form-data", + "multipart/mixed", + ) + ): + headers["content-type"] = serialized_request_body.media_type + + if http_headers is not None: + for header, value in http_headers.items(): + headers[header] = value + + timeout = timeout_ms / 1000 if timeout_ms is not None else None + + return client.build_request( + method, + url, + params=query_params, + content=serialized_request_body.content, + data=serialized_request_body.data, + files=serialized_request_body.files, + headers=headers, + timeout=timeout, + ) + + def do_request( + self, + hook_ctx, + request, + error_status_codes, + stream=False, + retry_config: Optional[Tuple[RetryConfig, List[str]]] = None, + ) -> httpx.Response: + client = self.sdk_configuration.client + logger = self.sdk_configuration.debug_logger + + hooks = self.sdk_configuration.__dict__["_hooks"] + + def do(): + http_res = None + try: + req = hooks.before_request(BeforeRequestContext(hook_ctx), request) + logger.debug( + "Request:\nMethod: %s\nURL: %s\nHeaders: %s\nBody: %s", + req.method, + req.url, + req.headers, + get_body_content(req), + ) + + if client is None: + raise ValueError("client is required") + + http_res = client.send(req, stream=stream) + except Exception as e: + _, e = hooks.after_error(AfterErrorContext(hook_ctx), None, e) + if e is not None: + logger.debug("Request Exception", exc_info=True) + raise e + + if http_res is None: + logger.debug("Raising no response SDK error") + raise errors.NoResponseError("No response received") + + logger.debug( + "Response:\nStatus Code: %s\nURL: %s\nHeaders: %s\nBody: %s", + http_res.status_code, + http_res.url, + http_res.headers, + "" if stream else http_res.text, + ) + + if utils.match_status_codes(error_status_codes, http_res.status_code): + result, err = hooks.after_error( + AfterErrorContext(hook_ctx), http_res, None + ) + if err is not None: + logger.debug("Request Exception", exc_info=True) + raise err + if result is not None: + http_res = result + else: + logger.debug("Raising unexpected SDK error") + raise errors.SDKError("Unexpected error occurred", http_res) + + return http_res + + if retry_config is not None: + http_res = utils.retry(do, utils.Retries(retry_config[0], retry_config[1])) + else: + http_res = do() + + if not utils.match_status_codes(error_status_codes, http_res.status_code): + http_res = hooks.after_success(AfterSuccessContext(hook_ctx), http_res) + + return http_res + + async def do_request_async( + self, + hook_ctx, + request, + error_status_codes, + stream=False, + retry_config: Optional[Tuple[RetryConfig, List[str]]] = None, + ) -> httpx.Response: + client = self.sdk_configuration.async_client + logger = self.sdk_configuration.debug_logger + + hooks = self.sdk_configuration.__dict__["_hooks"] + + async def do(): + http_res = None + try: + req = await run_sync_in_thread( + hooks.before_request, BeforeRequestContext(hook_ctx), request + ) + + logger.debug( + "Request:\nMethod: %s\nURL: %s\nHeaders: %s\nBody: %s", + req.method, + req.url, + req.headers, + get_body_content(req), + ) + + if client is None: + raise ValueError("client is required") + + http_res = await client.send(req, stream=stream) + except Exception as e: + _, e = await run_sync_in_thread( + hooks.after_error, AfterErrorContext(hook_ctx), None, e + ) + + if e is not None: + logger.debug("Request Exception", exc_info=True) + raise e + + if http_res is None: + logger.debug("Raising no response SDK error") + raise errors.NoResponseError("No response received") + + logger.debug( + "Response:\nStatus Code: %s\nURL: %s\nHeaders: %s\nBody: %s", + http_res.status_code, + http_res.url, + http_res.headers, + "" if stream else http_res.text, + ) + + if utils.match_status_codes(error_status_codes, http_res.status_code): + result, err = await run_sync_in_thread( + hooks.after_error, AfterErrorContext(hook_ctx), http_res, None + ) + + if err is not None: + logger.debug("Request Exception", exc_info=True) + raise err + if result is not None: + http_res = result + else: + logger.debug("Raising unexpected SDK error") + raise errors.SDKError("Unexpected error occurred", http_res) + + return http_res + + if retry_config is not None: + http_res = await utils.retry_async( + do, utils.Retries(retry_config[0], retry_config[1]) + ) + else: + http_res = await do() + + if not utils.match_status_codes(error_status_codes, http_res.status_code): + http_res = await run_sync_in_thread( + hooks.after_success, AfterSuccessContext(hook_ctx), http_res + ) + + return http_res diff --git a/packages/azure/src/mistralai/azure/client/chat.py b/packages/azure/src/mistralai/azure/client/chat.py new file mode 100644 index 00000000..1051f952 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/chat.py @@ -0,0 +1,713 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from mistralai.azure.client import errors, models, utils +from mistralai.azure.client._hooks import HookContext +from mistralai.azure.client.types import OptionalNullable, UNSET +from mistralai.azure.client.utils import eventstreaming +from mistralai.azure.client.utils.unmarshal_json_response import unmarshal_json_response +from typing import Any, Dict, List, Mapping, Optional, Union + + +class Chat(BaseSDK): + r"""Chat Completion API.""" + + def stream( + self, + *, + messages: Union[ + List[models.ChatCompletionStreamRequestMessage], + List[models.ChatCompletionStreamRequestMessageTypedDict], + ], + model: Optional[str] = "azureai", + temperature: OptionalNullable[float] = UNSET, + top_p: Optional[float] = None, + max_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = True, + stop: Optional[ + Union[ + models.ChatCompletionStreamRequestStop, + models.ChatCompletionStreamRequestStopTypedDict, + ] + ] = None, + random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + response_format: Optional[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = None, + tools: OptionalNullable[ + Union[List[models.Tool], List[models.ToolTypedDict]] + ] = UNSET, + tool_choice: Optional[ + Union[ + models.ChatCompletionStreamRequestToolChoice, + models.ChatCompletionStreamRequestToolChoiceTypedDict, + ] + ] = None, + presence_penalty: Optional[float] = None, + frequency_penalty: Optional[float] = None, + n: OptionalNullable[int] = UNSET, + prediction: Optional[ + Union[models.Prediction, models.PredictionTypedDict] + ] = None, + parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, + safe_prompt: Optional[bool] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStream[models.CompletionEvent]: + r"""Stream chat completion + + Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + + :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. + :param model: The ID of the model to use for this request. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param stream: + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. + :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param n: Number of completions to return for each request, input tokens are only billed once. + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + :param parallel_tool_calls: Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. + :param safe_prompt: Whether to inject a safety prompt before all conversations. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ChatCompletionStreamRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + metadata=metadata, + messages=utils.get_pydantic_model( + messages, List[models.ChatCompletionStreamRequestMessage] + ), + response_format=utils.get_pydantic_model( + response_format, Optional[models.ResponseFormat] + ), + tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), + tool_choice=utils.get_pydantic_model( + tool_choice, Optional[models.ChatCompletionStreamRequestToolChoice] + ), + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + n=n, + prediction=utils.get_pydantic_model( + prediction, Optional[models.Prediction] + ), + parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, + safe_prompt=safe_prompt, + ) + + req = self._build_request( + method="POST", + path="/chat/completions#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ChatCompletionStreamRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="stream_chat", + oauth2_scopes=None, + security_source=self.sdk_configuration.security, + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStream( + http_res, + lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), + sentinel="[DONE]", + client_ref=self, + ) + if utils.match_response(http_res, "422", "application/json"): + http_res_text = utils.stream_to_text(http_res) + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res, http_res_text + ) + raise errors.HTTPValidationError(response_data, http_res, http_res_text) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) + + async def stream_async( + self, + *, + messages: Union[ + List[models.ChatCompletionStreamRequestMessage], + List[models.ChatCompletionStreamRequestMessageTypedDict], + ], + model: Optional[str] = "azureai", + temperature: OptionalNullable[float] = UNSET, + top_p: Optional[float] = None, + max_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = True, + stop: Optional[ + Union[ + models.ChatCompletionStreamRequestStop, + models.ChatCompletionStreamRequestStopTypedDict, + ] + ] = None, + random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + response_format: Optional[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = None, + tools: OptionalNullable[ + Union[List[models.Tool], List[models.ToolTypedDict]] + ] = UNSET, + tool_choice: Optional[ + Union[ + models.ChatCompletionStreamRequestToolChoice, + models.ChatCompletionStreamRequestToolChoiceTypedDict, + ] + ] = None, + presence_penalty: Optional[float] = None, + frequency_penalty: Optional[float] = None, + n: OptionalNullable[int] = UNSET, + prediction: Optional[ + Union[models.Prediction, models.PredictionTypedDict] + ] = None, + parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, + safe_prompt: Optional[bool] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStreamAsync[models.CompletionEvent]: + r"""Stream chat completion + + Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + + :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. + :param model: The ID of the model to use for this request. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param stream: + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. + :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param n: Number of completions to return for each request, input tokens are only billed once. + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + :param parallel_tool_calls: Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. + :param safe_prompt: Whether to inject a safety prompt before all conversations. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ChatCompletionStreamRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + metadata=metadata, + messages=utils.get_pydantic_model( + messages, List[models.ChatCompletionStreamRequestMessage] + ), + response_format=utils.get_pydantic_model( + response_format, Optional[models.ResponseFormat] + ), + tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), + tool_choice=utils.get_pydantic_model( + tool_choice, Optional[models.ChatCompletionStreamRequestToolChoice] + ), + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + n=n, + prediction=utils.get_pydantic_model( + prediction, Optional[models.Prediction] + ), + parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, + safe_prompt=safe_prompt, + ) + + req = self._build_request_async( + method="POST", + path="/chat/completions#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ChatCompletionStreamRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="stream_chat", + oauth2_scopes=None, + security_source=self.sdk_configuration.security, + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStreamAsync( + http_res, + lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), + sentinel="[DONE]", + client_ref=self, + ) + if utils.match_response(http_res, "422", "application/json"): + http_res_text = await utils.stream_to_text_async(http_res) + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res, http_res_text + ) + raise errors.HTTPValidationError(response_data, http_res, http_res_text) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) + + def complete( + self, + *, + messages: Union[ + List[models.ChatCompletionRequestMessage], + List[models.ChatCompletionRequestMessageTypedDict], + ], + model: Optional[str] = "azureai", + temperature: OptionalNullable[float] = UNSET, + top_p: Optional[float] = None, + max_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = False, + stop: Optional[ + Union[ + models.ChatCompletionRequestStop, + models.ChatCompletionRequestStopTypedDict, + ] + ] = None, + random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + response_format: Optional[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = None, + tools: OptionalNullable[ + Union[List[models.Tool], List[models.ToolTypedDict]] + ] = UNSET, + tool_choice: Optional[ + Union[ + models.ChatCompletionRequestToolChoice, + models.ChatCompletionRequestToolChoiceTypedDict, + ] + ] = None, + presence_penalty: Optional[float] = None, + frequency_penalty: Optional[float] = None, + n: OptionalNullable[int] = UNSET, + prediction: Optional[ + Union[models.Prediction, models.PredictionTypedDict] + ] = None, + parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, + safe_prompt: Optional[bool] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ChatCompletionResponse: + r"""Chat Completion + + :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. + :param model: The ID of the model to use for this request. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. + :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param n: Number of completions to return for each request, input tokens are only billed once. + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + :param parallel_tool_calls: Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. + :param safe_prompt: Whether to inject a safety prompt before all conversations. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ChatCompletionRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + metadata=metadata, + messages=utils.get_pydantic_model( + messages, List[models.ChatCompletionRequestMessage] + ), + response_format=utils.get_pydantic_model( + response_format, Optional[models.ResponseFormat] + ), + tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), + tool_choice=utils.get_pydantic_model( + tool_choice, Optional[models.ChatCompletionRequestToolChoice] + ), + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + n=n, + prediction=utils.get_pydantic_model( + prediction, Optional[models.Prediction] + ), + parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, + safe_prompt=safe_prompt, + ) + + req = self._build_request( + method="POST", + path="/chat/completions", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ChatCompletionRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="chat_completion_v1_chat_completions_post", + oauth2_scopes=None, + security_source=self.sdk_configuration.security, + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ChatCompletionResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def complete_async( + self, + *, + messages: Union[ + List[models.ChatCompletionRequestMessage], + List[models.ChatCompletionRequestMessageTypedDict], + ], + model: Optional[str] = "azureai", + temperature: OptionalNullable[float] = UNSET, + top_p: Optional[float] = None, + max_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = False, + stop: Optional[ + Union[ + models.ChatCompletionRequestStop, + models.ChatCompletionRequestStopTypedDict, + ] + ] = None, + random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + response_format: Optional[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = None, + tools: OptionalNullable[ + Union[List[models.Tool], List[models.ToolTypedDict]] + ] = UNSET, + tool_choice: Optional[ + Union[ + models.ChatCompletionRequestToolChoice, + models.ChatCompletionRequestToolChoiceTypedDict, + ] + ] = None, + presence_penalty: Optional[float] = None, + frequency_penalty: Optional[float] = None, + n: OptionalNullable[int] = UNSET, + prediction: Optional[ + Union[models.Prediction, models.PredictionTypedDict] + ] = None, + parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, + safe_prompt: Optional[bool] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ChatCompletionResponse: + r"""Chat Completion + + :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. + :param model: The ID of the model to use for this request. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. + :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param n: Number of completions to return for each request, input tokens are only billed once. + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + :param parallel_tool_calls: Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. + :param safe_prompt: Whether to inject a safety prompt before all conversations. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ChatCompletionRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + metadata=metadata, + messages=utils.get_pydantic_model( + messages, List[models.ChatCompletionRequestMessage] + ), + response_format=utils.get_pydantic_model( + response_format, Optional[models.ResponseFormat] + ), + tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), + tool_choice=utils.get_pydantic_model( + tool_choice, Optional[models.ChatCompletionRequestToolChoice] + ), + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + n=n, + prediction=utils.get_pydantic_model( + prediction, Optional[models.Prediction] + ), + parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, + safe_prompt=safe_prompt, + ) + + req = self._build_request_async( + method="POST", + path="/chat/completions", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ChatCompletionRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="chat_completion_v1_chat_completions_post", + oauth2_scopes=None, + security_source=self.sdk_configuration.security, + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ChatCompletionResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) diff --git a/packages/azure/src/mistralai/azure/client/errors/__init__.py b/packages/azure/src/mistralai/azure/client/errors/__init__.py new file mode 100644 index 00000000..79e2712c --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/errors/__init__.py @@ -0,0 +1,39 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .mistralazureerror import MistralAzureError +from typing import Any, TYPE_CHECKING + +from mistralai.azure.client.utils.dynamic_imports import lazy_getattr, lazy_dir + +if TYPE_CHECKING: + from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData + from .no_response_error import NoResponseError + from .responsevalidationerror import ResponseValidationError + from .sdkerror import SDKError + +__all__ = [ + "HTTPValidationError", + "HTTPValidationErrorData", + "MistralAzureError", + "NoResponseError", + "ResponseValidationError", + "SDKError", +] + +_dynamic_imports: dict[str, str] = { + "HTTPValidationError": ".httpvalidationerror", + "HTTPValidationErrorData": ".httpvalidationerror", + "NoResponseError": ".no_response_error", + "ResponseValidationError": ".responsevalidationerror", + "SDKError": ".sdkerror", +} + + +def __getattr__(attr_name: str) -> Any: + return lazy_getattr( + attr_name, package=__package__, dynamic_imports=_dynamic_imports + ) + + +def __dir__(): + return lazy_dir(dynamic_imports=_dynamic_imports) diff --git a/packages/azure/src/mistralai/azure/client/errors/httpvalidationerror.py b/packages/azure/src/mistralai/azure/client/errors/httpvalidationerror.py new file mode 100644 index 00000000..b4f2691e --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/errors/httpvalidationerror.py @@ -0,0 +1,28 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from dataclasses import dataclass, field +import httpx +from mistralai.azure.client.errors import MistralAzureError +from mistralai.azure.client.models import validationerror as models_validationerror +from mistralai.azure.client.types import BaseModel +from typing import List, Optional + + +class HTTPValidationErrorData(BaseModel): + detail: Optional[List[models_validationerror.ValidationError]] = None + + +@dataclass(unsafe_hash=True) +class HTTPValidationError(MistralAzureError): + data: HTTPValidationErrorData = field(hash=False) + + def __init__( + self, + data: HTTPValidationErrorData, + raw_response: httpx.Response, + body: Optional[str] = None, + ): + message = body or raw_response.text + super().__init__(message, raw_response, body) + object.__setattr__(self, "data", data) diff --git a/packages/azure/src/mistralai/azure/client/errors/mistralazureerror.py b/packages/azure/src/mistralai/azure/client/errors/mistralazureerror.py new file mode 100644 index 00000000..c5bf1752 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/errors/mistralazureerror.py @@ -0,0 +1,30 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import httpx +from typing import Optional +from dataclasses import dataclass, field + + +@dataclass(unsafe_hash=True) +class MistralAzureError(Exception): + """The base class for all HTTP error responses.""" + + message: str + status_code: int + body: str + headers: httpx.Headers = field(hash=False) + raw_response: httpx.Response = field(hash=False) + + def __init__( + self, message: str, raw_response: httpx.Response, body: Optional[str] = None + ): + object.__setattr__(self, "message", message) + object.__setattr__(self, "status_code", raw_response.status_code) + object.__setattr__( + self, "body", body if body is not None else raw_response.text + ) + object.__setattr__(self, "headers", raw_response.headers) + object.__setattr__(self, "raw_response", raw_response) + + def __str__(self): + return self.message diff --git a/packages/azure/src/mistralai/azure/client/errors/no_response_error.py b/packages/azure/src/mistralai/azure/client/errors/no_response_error.py new file mode 100644 index 00000000..1deab64b --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/errors/no_response_error.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from dataclasses import dataclass + + +@dataclass(unsafe_hash=True) +class NoResponseError(Exception): + """Error raised when no HTTP response is received from the server.""" + + message: str + + def __init__(self, message: str = "No response received"): + object.__setattr__(self, "message", message) + super().__init__(message) + + def __str__(self): + return self.message diff --git a/packages/azure/src/mistralai/azure/client/errors/responsevalidationerror.py b/packages/azure/src/mistralai/azure/client/errors/responsevalidationerror.py new file mode 100644 index 00000000..02397334 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/errors/responsevalidationerror.py @@ -0,0 +1,27 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import httpx +from typing import Optional +from dataclasses import dataclass + +from mistralai.azure.client.errors import MistralAzureError + + +@dataclass(unsafe_hash=True) +class ResponseValidationError(MistralAzureError): + """Error raised when there is a type mismatch between the response data and the expected Pydantic model.""" + + def __init__( + self, + message: str, + raw_response: httpx.Response, + cause: Exception, + body: Optional[str] = None, + ): + message = f"{message}: {cause}" + super().__init__(message, raw_response, body) + + @property + def cause(self): + """Normally the Pydantic ValidationError""" + return self.__cause__ diff --git a/packages/azure/src/mistralai/azure/client/errors/sdkerror.py b/packages/azure/src/mistralai/azure/client/errors/sdkerror.py new file mode 100644 index 00000000..c4f3616c --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/errors/sdkerror.py @@ -0,0 +1,40 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import httpx +from typing import Optional +from dataclasses import dataclass + +from mistralai.azure.client.errors import MistralAzureError + +MAX_MESSAGE_LEN = 10_000 + + +@dataclass(unsafe_hash=True) +class SDKError(MistralAzureError): + """The fallback error class if no more specific error class is matched.""" + + def __init__( + self, message: str, raw_response: httpx.Response, body: Optional[str] = None + ): + body_display = body or raw_response.text or '""' + + if message: + message += ": " + message += f"Status {raw_response.status_code}" + + headers = raw_response.headers + content_type = headers.get("content-type", '""') + if content_type != "application/json": + if " " in content_type: + content_type = f'"{content_type}"' + message += f" Content-Type {content_type}" + + if len(body_display) > MAX_MESSAGE_LEN: + truncated = body_display[:MAX_MESSAGE_LEN] + remaining = len(body_display) - MAX_MESSAGE_LEN + body_display = f"{truncated}...and {remaining} more chars" + + message += f". Body: {body_display}" + message = message.strip() + + super().__init__(message, raw_response, body) diff --git a/packages/azure/src/mistralai/azure/client/httpclient.py b/packages/azure/src/mistralai/azure/client/httpclient.py new file mode 100644 index 00000000..89560b56 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/httpclient.py @@ -0,0 +1,125 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +# pyright: reportReturnType = false +import asyncio +from typing_extensions import Protocol, runtime_checkable +import httpx +from typing import Any, Optional, Union + + +@runtime_checkable +class HttpClient(Protocol): + def send( + self, + request: httpx.Request, + *, + stream: bool = False, + auth: Union[ + httpx._types.AuthTypes, httpx._client.UseClientDefault, None + ] = httpx.USE_CLIENT_DEFAULT, + follow_redirects: Union[ + bool, httpx._client.UseClientDefault + ] = httpx.USE_CLIENT_DEFAULT, + ) -> httpx.Response: + pass + + def build_request( + self, + method: str, + url: httpx._types.URLTypes, + *, + content: Optional[httpx._types.RequestContent] = None, + data: Optional[httpx._types.RequestData] = None, + files: Optional[httpx._types.RequestFiles] = None, + json: Optional[Any] = None, + params: Optional[httpx._types.QueryParamTypes] = None, + headers: Optional[httpx._types.HeaderTypes] = None, + cookies: Optional[httpx._types.CookieTypes] = None, + timeout: Union[ + httpx._types.TimeoutTypes, httpx._client.UseClientDefault + ] = httpx.USE_CLIENT_DEFAULT, + extensions: Optional[httpx._types.RequestExtensions] = None, + ) -> httpx.Request: + pass + + def close(self) -> None: + pass + + +@runtime_checkable +class AsyncHttpClient(Protocol): + async def send( + self, + request: httpx.Request, + *, + stream: bool = False, + auth: Union[ + httpx._types.AuthTypes, httpx._client.UseClientDefault, None + ] = httpx.USE_CLIENT_DEFAULT, + follow_redirects: Union[ + bool, httpx._client.UseClientDefault + ] = httpx.USE_CLIENT_DEFAULT, + ) -> httpx.Response: + pass + + def build_request( + self, + method: str, + url: httpx._types.URLTypes, + *, + content: Optional[httpx._types.RequestContent] = None, + data: Optional[httpx._types.RequestData] = None, + files: Optional[httpx._types.RequestFiles] = None, + json: Optional[Any] = None, + params: Optional[httpx._types.QueryParamTypes] = None, + headers: Optional[httpx._types.HeaderTypes] = None, + cookies: Optional[httpx._types.CookieTypes] = None, + timeout: Union[ + httpx._types.TimeoutTypes, httpx._client.UseClientDefault + ] = httpx.USE_CLIENT_DEFAULT, + extensions: Optional[httpx._types.RequestExtensions] = None, + ) -> httpx.Request: + pass + + async def aclose(self) -> None: + pass + + +class ClientOwner(Protocol): + client: Union[HttpClient, None] + async_client: Union[AsyncHttpClient, None] + + +def close_clients( + owner: ClientOwner, + sync_client: Union[HttpClient, None], + sync_client_supplied: bool, + async_client: Union[AsyncHttpClient, None], + async_client_supplied: bool, +) -> None: + """ + A finalizer function that is meant to be used with weakref.finalize to close + httpx clients used by an SDK so that underlying resources can be garbage + collected. + """ + + # Unset the client/async_client properties so there are no more references + # to them from the owning SDK instance and they can be reaped. + owner.client = None + owner.async_client = None + if sync_client is not None and not sync_client_supplied: + try: + sync_client.close() + except Exception: + pass + + if async_client is not None and not async_client_supplied: + try: + loop = asyncio.get_running_loop() + asyncio.run_coroutine_threadsafe(async_client.aclose(), loop) + except RuntimeError: + try: + asyncio.run(async_client.aclose()) + except RuntimeError: + # best effort + pass diff --git a/packages/azure/src/mistralai/azure/client/models/__init__.py b/packages/azure/src/mistralai/azure/client/models/__init__.py new file mode 100644 index 00000000..908dda32 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/__init__.py @@ -0,0 +1,390 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from typing import Any, TYPE_CHECKING + +from mistralai.azure.client.utils.dynamic_imports import lazy_getattr, lazy_dir + +if TYPE_CHECKING: + from .assistantmessage import ( + AssistantMessage, + AssistantMessageContent, + AssistantMessageContentTypedDict, + AssistantMessageTypedDict, + ) + from .chatcompletionchoice import ( + ChatCompletionChoice, + ChatCompletionChoiceFinishReason, + ChatCompletionChoiceTypedDict, + ) + from .chatcompletionrequest import ( + ChatCompletionRequest, + ChatCompletionRequestMessage, + ChatCompletionRequestMessageTypedDict, + ChatCompletionRequestStop, + ChatCompletionRequestStopTypedDict, + ChatCompletionRequestToolChoice, + ChatCompletionRequestToolChoiceTypedDict, + ChatCompletionRequestTypedDict, + ) + from .chatcompletionresponse import ( + ChatCompletionResponse, + ChatCompletionResponseTypedDict, + ) + from .chatcompletionstreamrequest import ( + ChatCompletionStreamRequest, + ChatCompletionStreamRequestMessage, + ChatCompletionStreamRequestMessageTypedDict, + ChatCompletionStreamRequestStop, + ChatCompletionStreamRequestStopTypedDict, + ChatCompletionStreamRequestToolChoice, + ChatCompletionStreamRequestToolChoiceTypedDict, + ChatCompletionStreamRequestTypedDict, + ) + from .completionchunk import CompletionChunk, CompletionChunkTypedDict + from .completionevent import CompletionEvent, CompletionEventTypedDict + from .completionresponsestreamchoice import ( + CompletionResponseStreamChoice, + CompletionResponseStreamChoiceFinishReason, + CompletionResponseStreamChoiceTypedDict, + ) + from .contentchunk import ContentChunk, ContentChunkTypedDict, UnknownContentChunk + from .deltamessage import ( + DeltaMessage, + DeltaMessageContent, + DeltaMessageContentTypedDict, + DeltaMessageTypedDict, + ) + from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict + from .filechunk import FileChunk, FileChunkTypedDict + from .function import Function, FunctionTypedDict + from .functioncall import ( + Arguments, + ArgumentsTypedDict, + FunctionCall, + FunctionCallTypedDict, + ) + from .functionname import FunctionName, FunctionNameTypedDict + from .imagedetail import ImageDetail + from .imageurl import ImageURL, ImageURLTypedDict + from .imageurlchunk import ( + ImageURLChunk, + ImageURLChunkTypedDict, + ImageURLUnion, + ImageURLUnionTypedDict, + ) + from .jsonschema import JSONSchema, JSONSchemaTypedDict + from .mistralpromptmode import MistralPromptMode + from .ocrimageobject import OCRImageObject, OCRImageObjectTypedDict + from .ocrpagedimensions import OCRPageDimensions, OCRPageDimensionsTypedDict + from .ocrpageobject import OCRPageObject, OCRPageObjectTypedDict + from .ocrrequest import ( + Document, + DocumentTypedDict, + OCRRequest, + OCRRequestTypedDict, + TableFormat, + ) + from .ocrresponse import OCRResponse, OCRResponseTypedDict + from .ocrtableobject import Format, OCRTableObject, OCRTableObjectTypedDict + from .ocrusageinfo import OCRUsageInfo, OCRUsageInfoTypedDict + from .prediction import Prediction, PredictionTypedDict + from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict + from .responseformat import ResponseFormat, ResponseFormatTypedDict + from .responseformats import ResponseFormats + from .security import Security, SecurityTypedDict + from .systemmessage import ( + SystemMessage, + SystemMessageContent, + SystemMessageContentTypedDict, + SystemMessageTypedDict, + ) + from .systemmessagecontentchunks import ( + SystemMessageContentChunks, + SystemMessageContentChunksTypedDict, + ) + from .textchunk import TextChunk, TextChunkTypedDict + from .thinkchunk import ThinkChunk, ThinkChunkTypedDict, Thinking, ThinkingTypedDict + from .tool import Tool, ToolTypedDict + from .toolcall import ToolCall, ToolCallTypedDict + from .toolchoice import ToolChoice, ToolChoiceTypedDict + from .toolchoiceenum import ToolChoiceEnum + from .toolmessage import ( + ToolMessage, + ToolMessageContent, + ToolMessageContentTypedDict, + ToolMessageTypedDict, + ) + from .tooltypes import ToolTypes + from .usageinfo import UsageInfo, UsageInfoTypedDict + from .usermessage import ( + UserMessage, + UserMessageContent, + UserMessageContentTypedDict, + UserMessageTypedDict, + ) + from .validationerror import ( + Loc, + LocTypedDict, + ValidationError, + ValidationErrorTypedDict, + ) + +__all__ = [ + "Arguments", + "ArgumentsTypedDict", + "AssistantMessage", + "AssistantMessageContent", + "AssistantMessageContentTypedDict", + "AssistantMessageTypedDict", + "ChatCompletionChoice", + "ChatCompletionChoiceFinishReason", + "ChatCompletionChoiceTypedDict", + "ChatCompletionRequest", + "ChatCompletionRequestMessage", + "ChatCompletionRequestMessageTypedDict", + "ChatCompletionRequestStop", + "ChatCompletionRequestStopTypedDict", + "ChatCompletionRequestToolChoice", + "ChatCompletionRequestToolChoiceTypedDict", + "ChatCompletionRequestTypedDict", + "ChatCompletionResponse", + "ChatCompletionResponseTypedDict", + "ChatCompletionStreamRequest", + "ChatCompletionStreamRequestMessage", + "ChatCompletionStreamRequestMessageTypedDict", + "ChatCompletionStreamRequestStop", + "ChatCompletionStreamRequestStopTypedDict", + "ChatCompletionStreamRequestToolChoice", + "ChatCompletionStreamRequestToolChoiceTypedDict", + "ChatCompletionStreamRequestTypedDict", + "CompletionChunk", + "CompletionChunkTypedDict", + "CompletionEvent", + "CompletionEventTypedDict", + "CompletionResponseStreamChoice", + "CompletionResponseStreamChoiceFinishReason", + "CompletionResponseStreamChoiceTypedDict", + "ContentChunk", + "ContentChunkTypedDict", + "DeltaMessage", + "DeltaMessageContent", + "DeltaMessageContentTypedDict", + "DeltaMessageTypedDict", + "Document", + "DocumentTypedDict", + "DocumentURLChunk", + "DocumentURLChunkTypedDict", + "FileChunk", + "FileChunkTypedDict", + "Format", + "Function", + "FunctionCall", + "FunctionCallTypedDict", + "FunctionName", + "FunctionNameTypedDict", + "FunctionTypedDict", + "ImageDetail", + "ImageURL", + "ImageURLChunk", + "ImageURLChunkTypedDict", + "ImageURLTypedDict", + "ImageURLUnion", + "ImageURLUnionTypedDict", + "JSONSchema", + "JSONSchemaTypedDict", + "Loc", + "LocTypedDict", + "MistralPromptMode", + "OCRImageObject", + "OCRImageObjectTypedDict", + "OCRPageDimensions", + "OCRPageDimensionsTypedDict", + "OCRPageObject", + "OCRPageObjectTypedDict", + "OCRRequest", + "OCRRequestTypedDict", + "OCRResponse", + "OCRResponseTypedDict", + "OCRTableObject", + "OCRTableObjectTypedDict", + "OCRUsageInfo", + "OCRUsageInfoTypedDict", + "Prediction", + "PredictionTypedDict", + "ReferenceChunk", + "ReferenceChunkTypedDict", + "ResponseFormat", + "ResponseFormatTypedDict", + "ResponseFormats", + "Security", + "SecurityTypedDict", + "SystemMessage", + "SystemMessageContent", + "SystemMessageContentChunks", + "SystemMessageContentChunksTypedDict", + "SystemMessageContentTypedDict", + "SystemMessageTypedDict", + "TableFormat", + "TextChunk", + "TextChunkTypedDict", + "ThinkChunk", + "ThinkChunkTypedDict", + "Thinking", + "ThinkingTypedDict", + "Tool", + "ToolCall", + "ToolCallTypedDict", + "ToolChoice", + "ToolChoiceEnum", + "ToolChoiceTypedDict", + "ToolMessage", + "ToolMessageContent", + "ToolMessageContentTypedDict", + "ToolMessageTypedDict", + "ToolTypedDict", + "ToolTypes", + "UnknownContentChunk", + "UsageInfo", + "UsageInfoTypedDict", + "UserMessage", + "UserMessageContent", + "UserMessageContentTypedDict", + "UserMessageTypedDict", + "ValidationError", + "ValidationErrorTypedDict", +] + +_dynamic_imports: dict[str, str] = { + "AssistantMessage": ".assistantmessage", + "AssistantMessageContent": ".assistantmessage", + "AssistantMessageContentTypedDict": ".assistantmessage", + "AssistantMessageTypedDict": ".assistantmessage", + "ChatCompletionChoice": ".chatcompletionchoice", + "ChatCompletionChoiceFinishReason": ".chatcompletionchoice", + "ChatCompletionChoiceTypedDict": ".chatcompletionchoice", + "ChatCompletionRequest": ".chatcompletionrequest", + "ChatCompletionRequestMessage": ".chatcompletionrequest", + "ChatCompletionRequestMessageTypedDict": ".chatcompletionrequest", + "ChatCompletionRequestStop": ".chatcompletionrequest", + "ChatCompletionRequestStopTypedDict": ".chatcompletionrequest", + "ChatCompletionRequestToolChoice": ".chatcompletionrequest", + "ChatCompletionRequestToolChoiceTypedDict": ".chatcompletionrequest", + "ChatCompletionRequestTypedDict": ".chatcompletionrequest", + "ChatCompletionResponse": ".chatcompletionresponse", + "ChatCompletionResponseTypedDict": ".chatcompletionresponse", + "ChatCompletionStreamRequest": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestMessage": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestMessageTypedDict": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestStop": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestStopTypedDict": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestToolChoice": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestToolChoiceTypedDict": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestTypedDict": ".chatcompletionstreamrequest", + "CompletionChunk": ".completionchunk", + "CompletionChunkTypedDict": ".completionchunk", + "CompletionEvent": ".completionevent", + "CompletionEventTypedDict": ".completionevent", + "CompletionResponseStreamChoice": ".completionresponsestreamchoice", + "CompletionResponseStreamChoiceFinishReason": ".completionresponsestreamchoice", + "CompletionResponseStreamChoiceTypedDict": ".completionresponsestreamchoice", + "ContentChunk": ".contentchunk", + "ContentChunkTypedDict": ".contentchunk", + "UnknownContentChunk": ".contentchunk", + "DeltaMessage": ".deltamessage", + "DeltaMessageContent": ".deltamessage", + "DeltaMessageContentTypedDict": ".deltamessage", + "DeltaMessageTypedDict": ".deltamessage", + "DocumentURLChunk": ".documenturlchunk", + "DocumentURLChunkTypedDict": ".documenturlchunk", + "FileChunk": ".filechunk", + "FileChunkTypedDict": ".filechunk", + "Function": ".function", + "FunctionTypedDict": ".function", + "Arguments": ".functioncall", + "ArgumentsTypedDict": ".functioncall", + "FunctionCall": ".functioncall", + "FunctionCallTypedDict": ".functioncall", + "FunctionName": ".functionname", + "FunctionNameTypedDict": ".functionname", + "ImageDetail": ".imagedetail", + "ImageURL": ".imageurl", + "ImageURLTypedDict": ".imageurl", + "ImageURLChunk": ".imageurlchunk", + "ImageURLChunkTypedDict": ".imageurlchunk", + "ImageURLUnion": ".imageurlchunk", + "ImageURLUnionTypedDict": ".imageurlchunk", + "JSONSchema": ".jsonschema", + "JSONSchemaTypedDict": ".jsonschema", + "MistralPromptMode": ".mistralpromptmode", + "OCRImageObject": ".ocrimageobject", + "OCRImageObjectTypedDict": ".ocrimageobject", + "OCRPageDimensions": ".ocrpagedimensions", + "OCRPageDimensionsTypedDict": ".ocrpagedimensions", + "OCRPageObject": ".ocrpageobject", + "OCRPageObjectTypedDict": ".ocrpageobject", + "Document": ".ocrrequest", + "DocumentTypedDict": ".ocrrequest", + "OCRRequest": ".ocrrequest", + "OCRRequestTypedDict": ".ocrrequest", + "TableFormat": ".ocrrequest", + "OCRResponse": ".ocrresponse", + "OCRResponseTypedDict": ".ocrresponse", + "Format": ".ocrtableobject", + "OCRTableObject": ".ocrtableobject", + "OCRTableObjectTypedDict": ".ocrtableobject", + "OCRUsageInfo": ".ocrusageinfo", + "OCRUsageInfoTypedDict": ".ocrusageinfo", + "Prediction": ".prediction", + "PredictionTypedDict": ".prediction", + "ReferenceChunk": ".referencechunk", + "ReferenceChunkTypedDict": ".referencechunk", + "ResponseFormat": ".responseformat", + "ResponseFormatTypedDict": ".responseformat", + "ResponseFormats": ".responseformats", + "Security": ".security", + "SecurityTypedDict": ".security", + "SystemMessage": ".systemmessage", + "SystemMessageContent": ".systemmessage", + "SystemMessageContentTypedDict": ".systemmessage", + "SystemMessageTypedDict": ".systemmessage", + "SystemMessageContentChunks": ".systemmessagecontentchunks", + "SystemMessageContentChunksTypedDict": ".systemmessagecontentchunks", + "TextChunk": ".textchunk", + "TextChunkTypedDict": ".textchunk", + "ThinkChunk": ".thinkchunk", + "ThinkChunkTypedDict": ".thinkchunk", + "Thinking": ".thinkchunk", + "ThinkingTypedDict": ".thinkchunk", + "Tool": ".tool", + "ToolTypedDict": ".tool", + "ToolCall": ".toolcall", + "ToolCallTypedDict": ".toolcall", + "ToolChoice": ".toolchoice", + "ToolChoiceTypedDict": ".toolchoice", + "ToolChoiceEnum": ".toolchoiceenum", + "ToolMessage": ".toolmessage", + "ToolMessageContent": ".toolmessage", + "ToolMessageContentTypedDict": ".toolmessage", + "ToolMessageTypedDict": ".toolmessage", + "ToolTypes": ".tooltypes", + "UsageInfo": ".usageinfo", + "UsageInfoTypedDict": ".usageinfo", + "UserMessage": ".usermessage", + "UserMessageContent": ".usermessage", + "UserMessageContentTypedDict": ".usermessage", + "UserMessageTypedDict": ".usermessage", + "Loc": ".validationerror", + "LocTypedDict": ".validationerror", + "ValidationError": ".validationerror", + "ValidationErrorTypedDict": ".validationerror", +} + + +def __getattr__(attr_name: str) -> Any: + return lazy_getattr( + attr_name, package=__package__, dynamic_imports=_dynamic_imports + ) + + +def __dir__(): + return lazy_dir(dynamic_imports=_dynamic_imports) diff --git a/packages/azure/src/mistralai/azure/client/models/assistantmessage.py b/packages/azure/src/mistralai/azure/client/models/assistantmessage.py new file mode 100644 index 00000000..e9ae6e82 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/assistantmessage.py @@ -0,0 +1,83 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .contentchunk import ContentChunk, ContentChunkTypedDict +from .toolcall import ToolCall, ToolCallTypedDict +from mistralai.azure.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.azure.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +AssistantMessageContentTypedDict = TypeAliasType( + "AssistantMessageContentTypedDict", Union[str, List[ContentChunkTypedDict]] +) + + +AssistantMessageContent = TypeAliasType( + "AssistantMessageContent", Union[str, List[ContentChunk]] +) + + +class AssistantMessageTypedDict(TypedDict): + role: Literal["assistant"] + content: NotRequired[Nullable[AssistantMessageContentTypedDict]] + tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] + prefix: NotRequired[bool] + r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" + + +class AssistantMessage(BaseModel): + role: Annotated[ + Annotated[ + Optional[Literal["assistant"]], AfterValidator(validate_const("assistant")) + ], + pydantic.Field(alias="role"), + ] = "assistant" + + content: OptionalNullable[AssistantMessageContent] = UNSET + + tool_calls: OptionalNullable[List[ToolCall]] = UNSET + + prefix: Optional[bool] = False + r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["role", "content", "tool_calls", "prefix"]) + nullable_fields = set(["content", "tool_calls"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + AssistantMessage.model_rebuild() +except NameError: + pass diff --git a/packages/azure/src/mistralai/azure/client/models/chatcompletionchoice.py b/packages/azure/src/mistralai/azure/client/models/chatcompletionchoice.py new file mode 100644 index 00000000..67b5ba69 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/chatcompletionchoice.py @@ -0,0 +1,33 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from mistralai.azure.client.types import BaseModel, UnrecognizedStr +from typing import Literal, Union +from typing_extensions import TypedDict + + +ChatCompletionChoiceFinishReason = Union[ + Literal[ + "stop", + "length", + "model_length", + "error", + "tool_calls", + ], + UnrecognizedStr, +] + + +class ChatCompletionChoiceTypedDict(TypedDict): + index: int + message: AssistantMessageTypedDict + finish_reason: ChatCompletionChoiceFinishReason + + +class ChatCompletionChoice(BaseModel): + index: int + + message: AssistantMessage + + finish_reason: ChatCompletionChoiceFinishReason diff --git a/packages/azure/src/mistralai/azure/client/models/chatcompletionrequest.py b/packages/azure/src/mistralai/azure/client/models/chatcompletionrequest.py new file mode 100644 index 00000000..edd0fdc7 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/chatcompletionrequest.py @@ -0,0 +1,225 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .mistralpromptmode import MistralPromptMode +from .prediction import Prediction, PredictionTypedDict +from .responseformat import ResponseFormat, ResponseFormatTypedDict +from .systemmessage import SystemMessage, SystemMessageTypedDict +from .tool import Tool, ToolTypedDict +from .toolchoice import ToolChoice, ToolChoiceTypedDict +from .toolchoiceenum import ToolChoiceEnum +from .toolmessage import ToolMessage, ToolMessageTypedDict +from .usermessage import UserMessage, UserMessageTypedDict +from mistralai.azure.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.azure.client.utils import get_discriminator +from pydantic import Discriminator, Tag, model_serializer +from typing import Any, Dict, List, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +ChatCompletionRequestStopTypedDict = TypeAliasType( + "ChatCompletionRequestStopTypedDict", Union[str, List[str]] +) +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +ChatCompletionRequestStop = TypeAliasType( + "ChatCompletionRequestStop", Union[str, List[str]] +) +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +ChatCompletionRequestMessageTypedDict = TypeAliasType( + "ChatCompletionRequestMessageTypedDict", + Union[ + SystemMessageTypedDict, + UserMessageTypedDict, + AssistantMessageTypedDict, + ToolMessageTypedDict, + ], +) + + +ChatCompletionRequestMessage = Annotated[ + Union[ + Annotated[AssistantMessage, Tag("assistant")], + Annotated[SystemMessage, Tag("system")], + Annotated[ToolMessage, Tag("tool")], + Annotated[UserMessage, Tag("user")], + ], + Discriminator(lambda m: get_discriminator(m, "role", "role")), +] + + +ChatCompletionRequestToolChoiceTypedDict = TypeAliasType( + "ChatCompletionRequestToolChoiceTypedDict", + Union[ToolChoiceTypedDict, ToolChoiceEnum], +) +r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" + + +ChatCompletionRequestToolChoice = TypeAliasType( + "ChatCompletionRequestToolChoice", Union[ToolChoice, ToolChoiceEnum] +) +r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" + + +class ChatCompletionRequestTypedDict(TypedDict): + messages: List[ChatCompletionRequestMessageTypedDict] + r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + model: NotRequired[str] + r"""The ID of the model to use for this request.""" + temperature: NotRequired[Nullable[float]] + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" + top_p: NotRequired[float] + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: NotRequired[Nullable[int]] + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + stream: NotRequired[bool] + r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" + stop: NotRequired[ChatCompletionRequestStopTypedDict] + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: NotRequired[Nullable[int]] + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + metadata: NotRequired[Nullable[Dict[str, Any]]] + response_format: NotRequired[ResponseFormatTypedDict] + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" + tools: NotRequired[Nullable[List[ToolTypedDict]]] + r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.""" + tool_choice: NotRequired[ChatCompletionRequestToolChoiceTypedDict] + r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" + presence_penalty: NotRequired[float] + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + frequency_penalty: NotRequired[float] + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + n: NotRequired[Nullable[int]] + r"""Number of completions to return for each request, input tokens are only billed once.""" + prediction: NotRequired[PredictionTypedDict] + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" + parallel_tool_calls: NotRequired[bool] + r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.""" + prompt_mode: NotRequired[Nullable[MistralPromptMode]] + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" + safe_prompt: NotRequired[bool] + r"""Whether to inject a safety prompt before all conversations.""" + + +class ChatCompletionRequest(BaseModel): + messages: List[ChatCompletionRequestMessage] + r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + + model: Optional[str] = "azureai" + r"""The ID of the model to use for this request.""" + + temperature: OptionalNullable[float] = UNSET + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" + + top_p: Optional[float] = None + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + + max_tokens: OptionalNullable[int] = UNSET + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + + stream: Optional[bool] = False + r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" + + stop: Optional[ChatCompletionRequestStop] = None + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + random_seed: OptionalNullable[int] = UNSET + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + + response_format: Optional[ResponseFormat] = None + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" + + tools: OptionalNullable[List[Tool]] = UNSET + r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.""" + + tool_choice: Optional[ChatCompletionRequestToolChoice] = None + r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" + + presence_penalty: Optional[float] = None + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + + frequency_penalty: Optional[float] = None + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + + n: OptionalNullable[int] = UNSET + r"""Number of completions to return for each request, input tokens are only billed once.""" + + prediction: Optional[Prediction] = None + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" + + parallel_tool_calls: Optional[bool] = None + r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.""" + + prompt_mode: OptionalNullable[MistralPromptMode] = UNSET + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" + + safe_prompt: Optional[bool] = None + r"""Whether to inject a safety prompt before all conversations.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "model", + "temperature", + "top_p", + "max_tokens", + "stream", + "stop", + "random_seed", + "metadata", + "response_format", + "tools", + "tool_choice", + "presence_penalty", + "frequency_penalty", + "n", + "prediction", + "parallel_tool_calls", + "prompt_mode", + "safe_prompt", + ] + ) + nullable_fields = set( + [ + "temperature", + "max_tokens", + "random_seed", + "metadata", + "tools", + "n", + "prompt_mode", + ] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/packages/azure/src/mistralai/azure/client/models/chatcompletionresponse.py b/packages/azure/src/mistralai/azure/client/models/chatcompletionresponse.py new file mode 100644 index 00000000..d41f9c6f --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/chatcompletionresponse.py @@ -0,0 +1,31 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict +from .usageinfo import UsageInfo, UsageInfoTypedDict +from mistralai.azure.client.types import BaseModel +from typing import List +from typing_extensions import TypedDict + + +class ChatCompletionResponseTypedDict(TypedDict): + id: str + object: str + model: str + usage: UsageInfoTypedDict + created: int + choices: List[ChatCompletionChoiceTypedDict] + + +class ChatCompletionResponse(BaseModel): + id: str + + object: str + + model: str + + usage: UsageInfo + + created: int + + choices: List[ChatCompletionChoice] diff --git a/packages/azure/src/mistralai/azure/client/models/chatcompletionstreamrequest.py b/packages/azure/src/mistralai/azure/client/models/chatcompletionstreamrequest.py new file mode 100644 index 00000000..2edfbed9 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/chatcompletionstreamrequest.py @@ -0,0 +1,223 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .mistralpromptmode import MistralPromptMode +from .prediction import Prediction, PredictionTypedDict +from .responseformat import ResponseFormat, ResponseFormatTypedDict +from .systemmessage import SystemMessage, SystemMessageTypedDict +from .tool import Tool, ToolTypedDict +from .toolchoice import ToolChoice, ToolChoiceTypedDict +from .toolchoiceenum import ToolChoiceEnum +from .toolmessage import ToolMessage, ToolMessageTypedDict +from .usermessage import UserMessage, UserMessageTypedDict +from mistralai.azure.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.azure.client.utils import get_discriminator +from pydantic import Discriminator, Tag, model_serializer +from typing import Any, Dict, List, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +ChatCompletionStreamRequestStopTypedDict = TypeAliasType( + "ChatCompletionStreamRequestStopTypedDict", Union[str, List[str]] +) +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +ChatCompletionStreamRequestStop = TypeAliasType( + "ChatCompletionStreamRequestStop", Union[str, List[str]] +) +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +ChatCompletionStreamRequestMessageTypedDict = TypeAliasType( + "ChatCompletionStreamRequestMessageTypedDict", + Union[ + SystemMessageTypedDict, + UserMessageTypedDict, + AssistantMessageTypedDict, + ToolMessageTypedDict, + ], +) + + +ChatCompletionStreamRequestMessage = Annotated[ + Union[ + Annotated[AssistantMessage, Tag("assistant")], + Annotated[SystemMessage, Tag("system")], + Annotated[ToolMessage, Tag("tool")], + Annotated[UserMessage, Tag("user")], + ], + Discriminator(lambda m: get_discriminator(m, "role", "role")), +] + + +ChatCompletionStreamRequestToolChoiceTypedDict = TypeAliasType( + "ChatCompletionStreamRequestToolChoiceTypedDict", + Union[ToolChoiceTypedDict, ToolChoiceEnum], +) +r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" + + +ChatCompletionStreamRequestToolChoice = TypeAliasType( + "ChatCompletionStreamRequestToolChoice", Union[ToolChoice, ToolChoiceEnum] +) +r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" + + +class ChatCompletionStreamRequestTypedDict(TypedDict): + messages: List[ChatCompletionStreamRequestMessageTypedDict] + r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + model: NotRequired[str] + r"""The ID of the model to use for this request.""" + temperature: NotRequired[Nullable[float]] + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" + top_p: NotRequired[float] + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: NotRequired[Nullable[int]] + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + stream: NotRequired[bool] + stop: NotRequired[ChatCompletionStreamRequestStopTypedDict] + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: NotRequired[Nullable[int]] + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + metadata: NotRequired[Nullable[Dict[str, Any]]] + response_format: NotRequired[ResponseFormatTypedDict] + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" + tools: NotRequired[Nullable[List[ToolTypedDict]]] + r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.""" + tool_choice: NotRequired[ChatCompletionStreamRequestToolChoiceTypedDict] + r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" + presence_penalty: NotRequired[float] + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + frequency_penalty: NotRequired[float] + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + n: NotRequired[Nullable[int]] + r"""Number of completions to return for each request, input tokens are only billed once.""" + prediction: NotRequired[PredictionTypedDict] + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" + parallel_tool_calls: NotRequired[bool] + r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.""" + prompt_mode: NotRequired[Nullable[MistralPromptMode]] + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" + safe_prompt: NotRequired[bool] + r"""Whether to inject a safety prompt before all conversations.""" + + +class ChatCompletionStreamRequest(BaseModel): + messages: List[ChatCompletionStreamRequestMessage] + r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + + model: Optional[str] = "azureai" + r"""The ID of the model to use for this request.""" + + temperature: OptionalNullable[float] = UNSET + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" + + top_p: Optional[float] = None + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + + max_tokens: OptionalNullable[int] = UNSET + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + + stream: Optional[bool] = True + + stop: Optional[ChatCompletionStreamRequestStop] = None + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + random_seed: OptionalNullable[int] = UNSET + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + + response_format: Optional[ResponseFormat] = None + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" + + tools: OptionalNullable[List[Tool]] = UNSET + r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.""" + + tool_choice: Optional[ChatCompletionStreamRequestToolChoice] = None + r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" + + presence_penalty: Optional[float] = None + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + + frequency_penalty: Optional[float] = None + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + + n: OptionalNullable[int] = UNSET + r"""Number of completions to return for each request, input tokens are only billed once.""" + + prediction: Optional[Prediction] = None + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" + + parallel_tool_calls: Optional[bool] = None + r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.""" + + prompt_mode: OptionalNullable[MistralPromptMode] = UNSET + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" + + safe_prompt: Optional[bool] = None + r"""Whether to inject a safety prompt before all conversations.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "model", + "temperature", + "top_p", + "max_tokens", + "stream", + "stop", + "random_seed", + "metadata", + "response_format", + "tools", + "tool_choice", + "presence_penalty", + "frequency_penalty", + "n", + "prediction", + "parallel_tool_calls", + "prompt_mode", + "safe_prompt", + ] + ) + nullable_fields = set( + [ + "temperature", + "max_tokens", + "random_seed", + "metadata", + "tools", + "n", + "prompt_mode", + ] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/packages/azure/src/mistralai/azure/client/models/completionchunk.py b/packages/azure/src/mistralai/azure/client/models/completionchunk.py new file mode 100644 index 00000000..0e64bbc8 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/completionchunk.py @@ -0,0 +1,51 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .completionresponsestreamchoice import ( + CompletionResponseStreamChoice, + CompletionResponseStreamChoiceTypedDict, +) +from .usageinfo import UsageInfo, UsageInfoTypedDict +from mistralai.azure.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer +from typing import List, Optional +from typing_extensions import NotRequired, TypedDict + + +class CompletionChunkTypedDict(TypedDict): + id: str + model: str + choices: List[CompletionResponseStreamChoiceTypedDict] + object: NotRequired[str] + created: NotRequired[int] + usage: NotRequired[UsageInfoTypedDict] + + +class CompletionChunk(BaseModel): + id: str + + model: str + + choices: List[CompletionResponseStreamChoice] + + object: Optional[str] = None + + created: Optional[int] = None + + usage: Optional[UsageInfo] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["object", "created", "usage"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/packages/azure/src/mistralai/azure/client/models/completionevent.py b/packages/azure/src/mistralai/azure/client/models/completionevent.py new file mode 100644 index 00000000..c4b27287 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/completionevent.py @@ -0,0 +1,14 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .completionchunk import CompletionChunk, CompletionChunkTypedDict +from mistralai.azure.client.types import BaseModel +from typing_extensions import TypedDict + + +class CompletionEventTypedDict(TypedDict): + data: CompletionChunkTypedDict + + +class CompletionEvent(BaseModel): + data: CompletionChunk diff --git a/packages/azure/src/mistralai/azure/client/models/completionresponsestreamchoice.py b/packages/azure/src/mistralai/azure/client/models/completionresponsestreamchoice.py new file mode 100644 index 00000000..20a27140 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/completionresponsestreamchoice.py @@ -0,0 +1,52 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .deltamessage import DeltaMessage, DeltaMessageTypedDict +from mistralai.azure.client.types import ( + BaseModel, + Nullable, + UNSET_SENTINEL, + UnrecognizedStr, +) +from pydantic import model_serializer +from typing import Literal, Union +from typing_extensions import TypedDict + + +CompletionResponseStreamChoiceFinishReason = Union[ + Literal[ + "stop", + "length", + "error", + "tool_calls", + ], + UnrecognizedStr, +] + + +class CompletionResponseStreamChoiceTypedDict(TypedDict): + index: int + delta: DeltaMessageTypedDict + finish_reason: Nullable[CompletionResponseStreamChoiceFinishReason] + + +class CompletionResponseStreamChoice(BaseModel): + index: int + + delta: DeltaMessage + + finish_reason: Nullable[CompletionResponseStreamChoiceFinishReason] + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + m[k] = val + + return m diff --git a/packages/azure/src/mistralai/azure/client/models/contentchunk.py b/packages/azure/src/mistralai/azure/client/models/contentchunk.py new file mode 100644 index 00000000..17efcc7d --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/contentchunk.py @@ -0,0 +1,50 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict +from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict +from .textchunk import TextChunk, TextChunkTypedDict +from functools import partial +from mistralai.azure.client.types import BaseModel +from mistralai.azure.client.utils.unions import parse_open_union +from pydantic import ConfigDict +from pydantic.functional_validators import BeforeValidator +from typing import Any, Literal, Union +from typing_extensions import Annotated, TypeAliasType + + +ContentChunkTypedDict = TypeAliasType( + "ContentChunkTypedDict", + Union[TextChunkTypedDict, ImageURLChunkTypedDict, ReferenceChunkTypedDict], +) + + +class UnknownContentChunk(BaseModel): + r"""A ContentChunk variant the SDK doesn't recognize. Preserves the raw payload.""" + + type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_CONTENT_CHUNK_VARIANTS: dict[str, Any] = { + "image_url": ImageURLChunk, + "text": TextChunk, + "reference": ReferenceChunk, +} + + +ContentChunk = Annotated[ + Union[ImageURLChunk, TextChunk, ReferenceChunk, UnknownContentChunk], + BeforeValidator( + partial( + parse_open_union, + disc_key="type", + variants=_CONTENT_CHUNK_VARIANTS, + unknown_cls=UnknownContentChunk, + union_name="ContentChunk", + ) + ), +] diff --git a/packages/azure/src/mistralai/azure/client/models/deltamessage.py b/packages/azure/src/mistralai/azure/client/models/deltamessage.py new file mode 100644 index 00000000..567e772f --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/deltamessage.py @@ -0,0 +1,64 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .contentchunk import ContentChunk, ContentChunkTypedDict +from .toolcall import ToolCall, ToolCallTypedDict +from mistralai.azure.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +DeltaMessageContentTypedDict = TypeAliasType( + "DeltaMessageContentTypedDict", Union[str, List[ContentChunkTypedDict]] +) + + +DeltaMessageContent = TypeAliasType( + "DeltaMessageContent", Union[str, List[ContentChunk]] +) + + +class DeltaMessageTypedDict(TypedDict): + role: NotRequired[Nullable[str]] + content: NotRequired[Nullable[DeltaMessageContentTypedDict]] + tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] + + +class DeltaMessage(BaseModel): + role: OptionalNullable[str] = UNSET + + content: OptionalNullable[DeltaMessageContent] = UNSET + + tool_calls: OptionalNullable[List[ToolCall]] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["role", "content", "tool_calls"]) + nullable_fields = set(["role", "content", "tool_calls"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/packages/azure/src/mistralai/azure/client/models/documenturlchunk.py b/packages/azure/src/mistralai/azure/client/models/documenturlchunk.py new file mode 100644 index 00000000..2dea8005 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/documenturlchunk.py @@ -0,0 +1,69 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.azure.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.azure.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class DocumentURLChunkTypedDict(TypedDict): + document_url: str + type: Literal["document_url"] + document_name: NotRequired[Nullable[str]] + r"""The filename of the document""" + + +class DocumentURLChunk(BaseModel): + document_url: str + + type: Annotated[ + Annotated[ + Optional[Literal["document_url"]], + AfterValidator(validate_const("document_url")), + ], + pydantic.Field(alias="type"), + ] = "document_url" + + document_name: OptionalNullable[str] = UNSET + r"""The filename of the document""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type", "document_name"]) + nullable_fields = set(["document_name"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + DocumentURLChunk.model_rebuild() +except NameError: + pass diff --git a/packages/azure/src/mistralai/azure/client/models/filechunk.py b/packages/azure/src/mistralai/azure/client/models/filechunk.py new file mode 100644 index 00000000..6baa0cba --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/filechunk.py @@ -0,0 +1,46 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.azure.client.types import BaseModel, UNSET_SENTINEL +from mistralai.azure.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, TypedDict + + +class FileChunkTypedDict(TypedDict): + file_id: str + type: Literal["file"] + + +class FileChunk(BaseModel): + file_id: str + + type: Annotated[ + Annotated[Optional[Literal["file"]], AfterValidator(validate_const("file"))], + pydantic.Field(alias="type"), + ] = "file" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + FileChunk.model_rebuild() +except NameError: + pass diff --git a/packages/azure/src/mistralai/azure/client/models/function.py b/packages/azure/src/mistralai/azure/client/models/function.py new file mode 100644 index 00000000..055d3657 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/function.py @@ -0,0 +1,40 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.azure.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Any, Dict, Optional +from typing_extensions import NotRequired, TypedDict + + +class FunctionTypedDict(TypedDict): + name: str + parameters: Dict[str, Any] + description: NotRequired[str] + strict: NotRequired[bool] + + +class Function(BaseModel): + name: str + + parameters: Dict[str, Any] + + description: Optional[str] = None + + strict: Optional[bool] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["description", "strict"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/packages/azure/src/mistralai/azure/client/models/functioncall.py b/packages/azure/src/mistralai/azure/client/models/functioncall.py new file mode 100644 index 00000000..d476792c --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/functioncall.py @@ -0,0 +1,23 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.azure.client.types import BaseModel +from typing import Any, Dict, Union +from typing_extensions import TypeAliasType, TypedDict + + +ArgumentsTypedDict = TypeAliasType("ArgumentsTypedDict", Union[Dict[str, Any], str]) + + +Arguments = TypeAliasType("Arguments", Union[Dict[str, Any], str]) + + +class FunctionCallTypedDict(TypedDict): + name: str + arguments: ArgumentsTypedDict + + +class FunctionCall(BaseModel): + name: str + + arguments: Arguments diff --git a/packages/azure/src/mistralai/azure/client/models/functionname.py b/packages/azure/src/mistralai/azure/client/models/functionname.py new file mode 100644 index 00000000..839e0d55 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/functionname.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.azure.client.types import BaseModel +from typing_extensions import TypedDict + + +class FunctionNameTypedDict(TypedDict): + r"""this restriction of `Function` is used to select a specific function to call""" + + name: str + + +class FunctionName(BaseModel): + r"""this restriction of `Function` is used to select a specific function to call""" + + name: str diff --git a/packages/azure/src/mistralai/azure/client/models/imagedetail.py b/packages/azure/src/mistralai/azure/client/models/imagedetail.py new file mode 100644 index 00000000..2d074cee --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/imagedetail.py @@ -0,0 +1,15 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.azure.client.types import UnrecognizedStr +from typing import Literal, Union + + +ImageDetail = Union[ + Literal[ + "low", + "auto", + "high", + ], + UnrecognizedStr, +] diff --git a/packages/azure/src/mistralai/azure/client/models/imageurl.py b/packages/azure/src/mistralai/azure/client/models/imageurl.py new file mode 100644 index 00000000..bcb4fe43 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/imageurl.py @@ -0,0 +1,49 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .imagedetail import ImageDetail +from mistralai.azure.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class ImageURLTypedDict(TypedDict): + url: str + detail: NotRequired[Nullable[ImageDetail]] + + +class ImageURL(BaseModel): + url: str + + detail: OptionalNullable[ImageDetail] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["detail"]) + nullable_fields = set(["detail"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/packages/azure/src/mistralai/azure/client/models/imageurlchunk.py b/packages/azure/src/mistralai/azure/client/models/imageurlchunk.py new file mode 100644 index 00000000..7213c498 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/imageurlchunk.py @@ -0,0 +1,61 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .imageurl import ImageURL, ImageURLTypedDict +from mistralai.azure.client.types import BaseModel, UNSET_SENTINEL +from mistralai.azure.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +ImageURLUnionTypedDict = TypeAliasType( + "ImageURLUnionTypedDict", Union[ImageURLTypedDict, str] +) + + +ImageURLUnion = TypeAliasType("ImageURLUnion", Union[ImageURL, str]) + + +class ImageURLChunkTypedDict(TypedDict): + r"""{\"type\":\"image_url\",\"image_url\":{\"url\":\"data:image/png;base64,iVBORw0""" + + image_url: ImageURLUnionTypedDict + type: Literal["image_url"] + + +class ImageURLChunk(BaseModel): + r"""{\"type\":\"image_url\",\"image_url\":{\"url\":\"data:image/png;base64,iVBORw0""" + + image_url: ImageURLUnion + + type: Annotated[ + Annotated[ + Optional[Literal["image_url"]], AfterValidator(validate_const("image_url")) + ], + pydantic.Field(alias="type"), + ] = "image_url" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ImageURLChunk.model_rebuild() +except NameError: + pass diff --git a/packages/azure/src/mistralai/azure/client/models/jsonschema.py b/packages/azure/src/mistralai/azure/client/models/jsonschema.py new file mode 100644 index 00000000..99f2fb89 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/jsonschema.py @@ -0,0 +1,62 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.azure.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +import pydantic +from pydantic import model_serializer +from typing import Any, Dict, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class JSONSchemaTypedDict(TypedDict): + name: str + schema_definition: Dict[str, Any] + description: NotRequired[Nullable[str]] + strict: NotRequired[bool] + + +class JSONSchema(BaseModel): + name: str + + schema_definition: Annotated[Dict[str, Any], pydantic.Field(alias="schema")] + + description: OptionalNullable[str] = UNSET + + strict: Optional[bool] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["description", "strict"]) + nullable_fields = set(["description"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + JSONSchema.model_rebuild() +except NameError: + pass diff --git a/packages/azure/src/mistralai/azure/client/models/mistralpromptmode.py b/packages/azure/src/mistralai/azure/client/models/mistralpromptmode.py new file mode 100644 index 00000000..26e7adbd --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/mistralpromptmode.py @@ -0,0 +1,12 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.azure.client.types import UnrecognizedStr +from typing import Literal, Union + + +MistralPromptMode = Union[Literal["reasoning",], UnrecognizedStr] +r"""Available options to the prompt_mode argument on the chat completion endpoint. +Values represent high-level intent. Assignment to actual SPs is handled internally. +System prompt may include knowledge cutoff date, model capabilities, tone to use, safety guidelines, etc. +""" diff --git a/packages/azure/src/mistralai/azure/client/models/ocrimageobject.py b/packages/azure/src/mistralai/azure/client/models/ocrimageobject.py new file mode 100644 index 00000000..a23515b3 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/ocrimageobject.py @@ -0,0 +1,86 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.azure.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class OCRImageObjectTypedDict(TypedDict): + id: str + r"""Image ID for extracted image in a page""" + top_left_x: Nullable[int] + r"""X coordinate of top-left corner of the extracted image""" + top_left_y: Nullable[int] + r"""Y coordinate of top-left corner of the extracted image""" + bottom_right_x: Nullable[int] + r"""X coordinate of bottom-right corner of the extracted image""" + bottom_right_y: Nullable[int] + r"""Y coordinate of bottom-right corner of the extracted image""" + image_base64: NotRequired[Nullable[str]] + r"""Base64 string of the extracted image""" + image_annotation: NotRequired[Nullable[str]] + r"""Annotation of the extracted image in json str""" + + +class OCRImageObject(BaseModel): + id: str + r"""Image ID for extracted image in a page""" + + top_left_x: Nullable[int] + r"""X coordinate of top-left corner of the extracted image""" + + top_left_y: Nullable[int] + r"""Y coordinate of top-left corner of the extracted image""" + + bottom_right_x: Nullable[int] + r"""X coordinate of bottom-right corner of the extracted image""" + + bottom_right_y: Nullable[int] + r"""Y coordinate of bottom-right corner of the extracted image""" + + image_base64: OptionalNullable[str] = UNSET + r"""Base64 string of the extracted image""" + + image_annotation: OptionalNullable[str] = UNSET + r"""Annotation of the extracted image in json str""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["image_base64", "image_annotation"]) + nullable_fields = set( + [ + "top_left_x", + "top_left_y", + "bottom_right_x", + "bottom_right_y", + "image_base64", + "image_annotation", + ] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/packages/azure/src/mistralai/azure/client/models/ocrpagedimensions.py b/packages/azure/src/mistralai/azure/client/models/ocrpagedimensions.py new file mode 100644 index 00000000..12858da9 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/ocrpagedimensions.py @@ -0,0 +1,25 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.azure.client.types import BaseModel +from typing_extensions import TypedDict + + +class OCRPageDimensionsTypedDict(TypedDict): + dpi: int + r"""Dots per inch of the page-image""" + height: int + r"""Height of the image in pixels""" + width: int + r"""Width of the image in pixels""" + + +class OCRPageDimensions(BaseModel): + dpi: int + r"""Dots per inch of the page-image""" + + height: int + r"""Height of the image in pixels""" + + width: int + r"""Width of the image in pixels""" diff --git a/packages/azure/src/mistralai/azure/client/models/ocrpageobject.py b/packages/azure/src/mistralai/azure/client/models/ocrpageobject.py new file mode 100644 index 00000000..434c8988 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/ocrpageobject.py @@ -0,0 +1,86 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .ocrimageobject import OCRImageObject, OCRImageObjectTypedDict +from .ocrpagedimensions import OCRPageDimensions, OCRPageDimensionsTypedDict +from .ocrtableobject import OCRTableObject, OCRTableObjectTypedDict +from mistralai.azure.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List, Optional +from typing_extensions import NotRequired, TypedDict + + +class OCRPageObjectTypedDict(TypedDict): + index: int + r"""The page index in a pdf document starting from 0""" + markdown: str + r"""The markdown string response of the page""" + images: List[OCRImageObjectTypedDict] + r"""List of all extracted images in the page""" + dimensions: Nullable[OCRPageDimensionsTypedDict] + r"""The dimensions of the PDF Page's screenshot image""" + tables: NotRequired[List[OCRTableObjectTypedDict]] + r"""List of all extracted tables in the page""" + hyperlinks: NotRequired[List[str]] + r"""List of all hyperlinks in the page""" + header: NotRequired[Nullable[str]] + r"""Header of the page""" + footer: NotRequired[Nullable[str]] + r"""Footer of the page""" + + +class OCRPageObject(BaseModel): + index: int + r"""The page index in a pdf document starting from 0""" + + markdown: str + r"""The markdown string response of the page""" + + images: List[OCRImageObject] + r"""List of all extracted images in the page""" + + dimensions: Nullable[OCRPageDimensions] + r"""The dimensions of the PDF Page's screenshot image""" + + tables: Optional[List[OCRTableObject]] = None + r"""List of all extracted tables in the page""" + + hyperlinks: Optional[List[str]] = None + r"""List of all hyperlinks in the page""" + + header: OptionalNullable[str] = UNSET + r"""Header of the page""" + + footer: OptionalNullable[str] = UNSET + r"""Footer of the page""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["tables", "hyperlinks", "header", "footer"]) + nullable_fields = set(["header", "footer", "dimensions"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/packages/azure/src/mistralai/azure/client/models/ocrrequest.py b/packages/azure/src/mistralai/azure/client/models/ocrrequest.py new file mode 100644 index 00000000..a2cd3415 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/ocrrequest.py @@ -0,0 +1,145 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict +from .filechunk import FileChunk, FileChunkTypedDict +from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict +from .responseformat import ResponseFormat, ResponseFormatTypedDict +from mistralai.azure.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List, Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +DocumentTypedDict = TypeAliasType( + "DocumentTypedDict", + Union[FileChunkTypedDict, ImageURLChunkTypedDict, DocumentURLChunkTypedDict], +) +r"""Document to run OCR on""" + + +Document = TypeAliasType("Document", Union[FileChunk, ImageURLChunk, DocumentURLChunk]) +r"""Document to run OCR on""" + + +TableFormat = Literal[ + "markdown", + "html", +] + + +class OCRRequestTypedDict(TypedDict): + model: Nullable[str] + document: DocumentTypedDict + r"""Document to run OCR on""" + id: NotRequired[str] + pages: NotRequired[Nullable[List[int]]] + r"""Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0""" + include_image_base64: NotRequired[Nullable[bool]] + r"""Include image URLs in response""" + image_limit: NotRequired[Nullable[int]] + r"""Max images to extract""" + image_min_size: NotRequired[Nullable[int]] + r"""Minimum height and width of image to extract""" + bbox_annotation_format: NotRequired[Nullable[ResponseFormatTypedDict]] + r"""Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field""" + document_annotation_format: NotRequired[Nullable[ResponseFormatTypedDict]] + r"""Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field""" + document_annotation_prompt: NotRequired[Nullable[str]] + r"""Optional prompt to guide the model in extracting structured output from the entire document. A document_annotation_format must be provided.""" + table_format: NotRequired[Nullable[TableFormat]] + extract_header: NotRequired[bool] + extract_footer: NotRequired[bool] + + +class OCRRequest(BaseModel): + model: Nullable[str] + + document: Document + r"""Document to run OCR on""" + + id: Optional[str] = None + + pages: OptionalNullable[List[int]] = UNSET + r"""Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0""" + + include_image_base64: OptionalNullable[bool] = UNSET + r"""Include image URLs in response""" + + image_limit: OptionalNullable[int] = UNSET + r"""Max images to extract""" + + image_min_size: OptionalNullable[int] = UNSET + r"""Minimum height and width of image to extract""" + + bbox_annotation_format: OptionalNullable[ResponseFormat] = UNSET + r"""Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field""" + + document_annotation_format: OptionalNullable[ResponseFormat] = UNSET + r"""Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field""" + + document_annotation_prompt: OptionalNullable[str] = UNSET + r"""Optional prompt to guide the model in extracting structured output from the entire document. A document_annotation_format must be provided.""" + + table_format: OptionalNullable[TableFormat] = UNSET + + extract_header: Optional[bool] = None + + extract_footer: Optional[bool] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "id", + "pages", + "include_image_base64", + "image_limit", + "image_min_size", + "bbox_annotation_format", + "document_annotation_format", + "document_annotation_prompt", + "table_format", + "extract_header", + "extract_footer", + ] + ) + nullable_fields = set( + [ + "model", + "pages", + "include_image_base64", + "image_limit", + "image_min_size", + "bbox_annotation_format", + "document_annotation_format", + "document_annotation_prompt", + "table_format", + ] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/packages/azure/src/mistralai/azure/client/models/ocrresponse.py b/packages/azure/src/mistralai/azure/client/models/ocrresponse.py new file mode 100644 index 00000000..3dc09fd7 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/ocrresponse.py @@ -0,0 +1,63 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .ocrpageobject import OCRPageObject, OCRPageObjectTypedDict +from .ocrusageinfo import OCRUsageInfo, OCRUsageInfoTypedDict +from mistralai.azure.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List +from typing_extensions import NotRequired, TypedDict + + +class OCRResponseTypedDict(TypedDict): + pages: List[OCRPageObjectTypedDict] + r"""List of OCR info for pages.""" + model: str + r"""The model used to generate the OCR.""" + usage_info: OCRUsageInfoTypedDict + document_annotation: NotRequired[Nullable[str]] + r"""Formatted response in the request_format if provided in json str""" + + +class OCRResponse(BaseModel): + pages: List[OCRPageObject] + r"""List of OCR info for pages.""" + + model: str + r"""The model used to generate the OCR.""" + + usage_info: OCRUsageInfo + + document_annotation: OptionalNullable[str] = UNSET + r"""Formatted response in the request_format if provided in json str""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["document_annotation"]) + nullable_fields = set(["document_annotation"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/packages/azure/src/mistralai/azure/client/models/ocrtableobject.py b/packages/azure/src/mistralai/azure/client/models/ocrtableobject.py new file mode 100644 index 00000000..f1de5428 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/ocrtableobject.py @@ -0,0 +1,43 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.azure.client.types import BaseModel, UnrecognizedStr +import pydantic +from typing import Literal, Union +from typing_extensions import Annotated, TypedDict + + +Format = Union[ + Literal[ + "markdown", + "html", + ], + UnrecognizedStr, +] +r"""Format of the table""" + + +class OCRTableObjectTypedDict(TypedDict): + id: str + r"""Table ID for extracted table in a page""" + content: str + r"""Content of the table in the given format""" + format_: Format + r"""Format of the table""" + + +class OCRTableObject(BaseModel): + id: str + r"""Table ID for extracted table in a page""" + + content: str + r"""Content of the table in the given format""" + + format_: Annotated[Format, pydantic.Field(alias="format")] + r"""Format of the table""" + + +try: + OCRTableObject.model_rebuild() +except NameError: + pass diff --git a/packages/azure/src/mistralai/azure/client/models/ocrusageinfo.py b/packages/azure/src/mistralai/azure/client/models/ocrusageinfo.py new file mode 100644 index 00000000..f63315d2 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/ocrusageinfo.py @@ -0,0 +1,52 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.azure.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class OCRUsageInfoTypedDict(TypedDict): + pages_processed: int + r"""Number of pages processed""" + doc_size_bytes: NotRequired[Nullable[int]] + r"""Document size in bytes""" + + +class OCRUsageInfo(BaseModel): + pages_processed: int + r"""Number of pages processed""" + + doc_size_bytes: OptionalNullable[int] = UNSET + r"""Document size in bytes""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["doc_size_bytes"]) + nullable_fields = set(["doc_size_bytes"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/packages/azure/src/mistralai/azure/client/models/prediction.py b/packages/azure/src/mistralai/azure/client/models/prediction.py new file mode 100644 index 00000000..1fa1d782 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/prediction.py @@ -0,0 +1,52 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.azure.client.types import BaseModel, UNSET_SENTINEL +from mistralai.azure.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class PredictionTypedDict(TypedDict): + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" + + type: Literal["content"] + content: NotRequired[str] + + +class Prediction(BaseModel): + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" + + type: Annotated[ + Annotated[ + Optional[Literal["content"]], AfterValidator(validate_const("content")) + ], + pydantic.Field(alias="type"), + ] = "content" + + content: Optional[str] = "" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type", "content"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + Prediction.model_rebuild() +except NameError: + pass diff --git a/packages/azure/src/mistralai/azure/client/models/referencechunk.py b/packages/azure/src/mistralai/azure/client/models/referencechunk.py new file mode 100644 index 00000000..f7af9bf9 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/referencechunk.py @@ -0,0 +1,48 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.azure.client.types import BaseModel, UNSET_SENTINEL +from mistralai.azure.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional +from typing_extensions import Annotated, TypedDict + + +class ReferenceChunkTypedDict(TypedDict): + reference_ids: List[int] + type: Literal["reference"] + + +class ReferenceChunk(BaseModel): + reference_ids: List[int] + + type: Annotated[ + Annotated[ + Optional[Literal["reference"]], AfterValidator(validate_const("reference")) + ], + pydantic.Field(alias="type"), + ] = "reference" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ReferenceChunk.model_rebuild() +except NameError: + pass diff --git a/packages/azure/src/mistralai/azure/client/models/responseformat.py b/packages/azure/src/mistralai/azure/client/models/responseformat.py new file mode 100644 index 00000000..20fd2b86 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/responseformat.py @@ -0,0 +1,55 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .jsonschema import JSONSchema, JSONSchemaTypedDict +from .responseformats import ResponseFormats +from mistralai.azure.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class ResponseFormatTypedDict(TypedDict): + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" + + type: NotRequired[ResponseFormats] + json_schema: NotRequired[Nullable[JSONSchemaTypedDict]] + + +class ResponseFormat(BaseModel): + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" + + type: Optional[ResponseFormats] = None + + json_schema: OptionalNullable[JSONSchema] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type", "json_schema"]) + nullable_fields = set(["json_schema"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/packages/azure/src/mistralai/azure/client/models/responseformats.py b/packages/azure/src/mistralai/azure/client/models/responseformats.py new file mode 100644 index 00000000..cbf83ce7 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/responseformats.py @@ -0,0 +1,11 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from typing import Literal + + +ResponseFormats = Literal[ + "text", + "json_object", + "json_schema", +] diff --git a/packages/azure/src/mistralai/azure/client/models/security.py b/packages/azure/src/mistralai/azure/client/models/security.py new file mode 100644 index 00000000..9b83ba98 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/security.py @@ -0,0 +1,24 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.azure.client.types import BaseModel +from mistralai.azure.client.utils import FieldMetadata, SecurityMetadata +from typing_extensions import Annotated, TypedDict + + +class SecurityTypedDict(TypedDict): + api_key: str + + +class Security(BaseModel): + api_key: Annotated[ + str, + FieldMetadata( + security=SecurityMetadata( + scheme=True, + scheme_type="http", + sub_type="bearer", + field_name="Authorization", + ) + ), + ] diff --git a/packages/azure/src/mistralai/azure/client/models/systemmessage.py b/packages/azure/src/mistralai/azure/client/models/systemmessage.py new file mode 100644 index 00000000..d4bd0044 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/systemmessage.py @@ -0,0 +1,44 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .systemmessagecontentchunks import ( + SystemMessageContentChunks, + SystemMessageContentChunksTypedDict, +) +from mistralai.azure.client.types import BaseModel +from mistralai.azure.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +SystemMessageContentTypedDict = TypeAliasType( + "SystemMessageContentTypedDict", + Union[str, List[SystemMessageContentChunksTypedDict]], +) + + +SystemMessageContent = TypeAliasType( + "SystemMessageContent", Union[str, List[SystemMessageContentChunks]] +) + + +class SystemMessageTypedDict(TypedDict): + content: SystemMessageContentTypedDict + role: Literal["system"] + + +class SystemMessage(BaseModel): + content: SystemMessageContent + + role: Annotated[ + Annotated[Literal["system"], AfterValidator(validate_const("system"))], + pydantic.Field(alias="role"), + ] = "system" + + +try: + SystemMessage.model_rebuild() +except NameError: + pass diff --git a/packages/azure/src/mistralai/azure/client/models/systemmessagecontentchunks.py b/packages/azure/src/mistralai/azure/client/models/systemmessagecontentchunks.py new file mode 100644 index 00000000..8de71c90 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/systemmessagecontentchunks.py @@ -0,0 +1,19 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .textchunk import TextChunk, TextChunkTypedDict +from .thinkchunk import ThinkChunk, ThinkChunkTypedDict +from pydantic import Field +from typing import Union +from typing_extensions import Annotated, TypeAliasType + + +SystemMessageContentChunksTypedDict = TypeAliasType( + "SystemMessageContentChunksTypedDict", + Union[TextChunkTypedDict, ThinkChunkTypedDict], +) + + +SystemMessageContentChunks = Annotated[ + Union[TextChunk, ThinkChunk], Field(discriminator="type") +] diff --git a/packages/azure/src/mistralai/azure/client/models/textchunk.py b/packages/azure/src/mistralai/azure/client/models/textchunk.py new file mode 100644 index 00000000..92951485 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/textchunk.py @@ -0,0 +1,29 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.azure.client.types import BaseModel +from mistralai.azure.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import Literal +from typing_extensions import Annotated, TypedDict + + +class TextChunkTypedDict(TypedDict): + text: str + type: Literal["text"] + + +class TextChunk(BaseModel): + text: str + + type: Annotated[ + Annotated[Literal["text"], AfterValidator(validate_const("text"))], + pydantic.Field(alias="type"), + ] = "text" + + +try: + TextChunk.model_rebuild() +except NameError: + pass diff --git a/packages/azure/src/mistralai/azure/client/models/thinkchunk.py b/packages/azure/src/mistralai/azure/client/models/thinkchunk.py new file mode 100644 index 00000000..4e881aad --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/thinkchunk.py @@ -0,0 +1,61 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict +from .textchunk import TextChunk, TextChunkTypedDict +from mistralai.azure.client.types import BaseModel, UNSET_SENTINEL +from mistralai.azure.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +ThinkingTypedDict = TypeAliasType( + "ThinkingTypedDict", Union[ReferenceChunkTypedDict, TextChunkTypedDict] +) + + +Thinking = TypeAliasType("Thinking", Union[ReferenceChunk, TextChunk]) + + +class ThinkChunkTypedDict(TypedDict): + thinking: List[ThinkingTypedDict] + type: Literal["thinking"] + closed: NotRequired[bool] + r"""Whether the thinking chunk is closed or not. Currently only used for prefixing.""" + + +class ThinkChunk(BaseModel): + thinking: List[Thinking] + + type: Annotated[ + Annotated[Literal["thinking"], AfterValidator(validate_const("thinking"))], + pydantic.Field(alias="type"), + ] = "thinking" + + closed: Optional[bool] = None + r"""Whether the thinking chunk is closed or not. Currently only used for prefixing.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["closed"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ThinkChunk.model_rebuild() +except NameError: + pass diff --git a/packages/azure/src/mistralai/azure/client/models/tool.py b/packages/azure/src/mistralai/azure/client/models/tool.py new file mode 100644 index 00000000..87329bdb --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/tool.py @@ -0,0 +1,36 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .function import Function, FunctionTypedDict +from .tooltypes import ToolTypes +from mistralai.azure.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class ToolTypedDict(TypedDict): + function: FunctionTypedDict + type: NotRequired[ToolTypes] + + +class Tool(BaseModel): + function: Function + + type: Optional[ToolTypes] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/packages/azure/src/mistralai/azure/client/models/toolcall.py b/packages/azure/src/mistralai/azure/client/models/toolcall.py new file mode 100644 index 00000000..ada1ea65 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/toolcall.py @@ -0,0 +1,42 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .functioncall import FunctionCall, FunctionCallTypedDict +from .tooltypes import ToolTypes +from mistralai.azure.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class ToolCallTypedDict(TypedDict): + function: FunctionCallTypedDict + id: NotRequired[str] + type: NotRequired[ToolTypes] + index: NotRequired[int] + + +class ToolCall(BaseModel): + function: FunctionCall + + id: Optional[str] = "null" + + type: Optional[ToolTypes] = None + + index: Optional[int] = 0 + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["id", "type", "index"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/packages/azure/src/mistralai/azure/client/models/toolchoice.py b/packages/azure/src/mistralai/azure/client/models/toolchoice.py new file mode 100644 index 00000000..ddb9e141 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/toolchoice.py @@ -0,0 +1,42 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .functionname import FunctionName, FunctionNameTypedDict +from .tooltypes import ToolTypes +from mistralai.azure.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class ToolChoiceTypedDict(TypedDict): + r"""ToolChoice is either a ToolChoiceEnum or a ToolChoice""" + + function: FunctionNameTypedDict + r"""this restriction of `Function` is used to select a specific function to call""" + type: NotRequired[ToolTypes] + + +class ToolChoice(BaseModel): + r"""ToolChoice is either a ToolChoiceEnum or a ToolChoice""" + + function: FunctionName + r"""this restriction of `Function` is used to select a specific function to call""" + + type: Optional[ToolTypes] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/packages/azure/src/mistralai/azure/client/models/toolchoiceenum.py b/packages/azure/src/mistralai/azure/client/models/toolchoiceenum.py new file mode 100644 index 00000000..01f6f677 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/toolchoiceenum.py @@ -0,0 +1,12 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from typing import Literal + + +ToolChoiceEnum = Literal[ + "auto", + "none", + "any", + "required", +] diff --git a/packages/azure/src/mistralai/azure/client/models/toolmessage.py b/packages/azure/src/mistralai/azure/client/models/toolmessage.py new file mode 100644 index 00000000..670210de --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/toolmessage.py @@ -0,0 +1,76 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .contentchunk import ContentChunk, ContentChunkTypedDict +from mistralai.azure.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.azure.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +ToolMessageContentTypedDict = TypeAliasType( + "ToolMessageContentTypedDict", Union[str, List[ContentChunkTypedDict]] +) + + +ToolMessageContent = TypeAliasType("ToolMessageContent", Union[str, List[ContentChunk]]) + + +class ToolMessageTypedDict(TypedDict): + content: Nullable[ToolMessageContentTypedDict] + role: Literal["tool"] + tool_call_id: NotRequired[Nullable[str]] + name: NotRequired[Nullable[str]] + + +class ToolMessage(BaseModel): + content: Nullable[ToolMessageContent] + + role: Annotated[ + Annotated[Literal["tool"], AfterValidator(validate_const("tool"))], + pydantic.Field(alias="role"), + ] = "tool" + + tool_call_id: OptionalNullable[str] = UNSET + + name: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["tool_call_id", "name"]) + nullable_fields = set(["content", "tool_call_id", "name"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + ToolMessage.model_rebuild() +except NameError: + pass diff --git a/packages/azure/src/mistralai/azure/client/models/tooltypes.py b/packages/azure/src/mistralai/azure/client/models/tooltypes.py new file mode 100644 index 00000000..1cce7446 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/tooltypes.py @@ -0,0 +1,8 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.azure.client.types import UnrecognizedStr +from typing import Literal, Union + + +ToolTypes = Union[Literal["function",], UnrecognizedStr] diff --git a/packages/azure/src/mistralai/azure/client/models/usageinfo.py b/packages/azure/src/mistralai/azure/client/models/usageinfo.py new file mode 100644 index 00000000..0f04c87c --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/usageinfo.py @@ -0,0 +1,79 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.azure.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +import pydantic +from pydantic import ConfigDict, model_serializer +from typing import Any, Dict, Optional +from typing_extensions import NotRequired, TypedDict + + +class UsageInfoTypedDict(TypedDict): + prompt_tokens: NotRequired[int] + completion_tokens: NotRequired[int] + total_tokens: NotRequired[int] + prompt_audio_seconds: NotRequired[Nullable[int]] + + +class UsageInfo(BaseModel): + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, extra="allow" + ) + __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) + + prompt_tokens: Optional[int] = 0 + + completion_tokens: Optional[int] = 0 + + total_tokens: Optional[int] = 0 + + prompt_audio_seconds: OptionalNullable[int] = UNSET + + @property + def additional_properties(self): + return self.__pydantic_extra__ + + @additional_properties.setter + def additional_properties(self, value): + self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "prompt_tokens", + "completion_tokens", + "total_tokens", + "prompt_audio_seconds", + ] + ) + nullable_fields = set(["prompt_audio_seconds"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + for k, v in serialized.items(): + m[k] = v + + return m diff --git a/packages/azure/src/mistralai/azure/client/models/usermessage.py b/packages/azure/src/mistralai/azure/client/models/usermessage.py new file mode 100644 index 00000000..549b01ca --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/usermessage.py @@ -0,0 +1,53 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .contentchunk import ContentChunk, ContentChunkTypedDict +from mistralai.azure.client.types import BaseModel, Nullable, UNSET_SENTINEL +from mistralai.azure.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +UserMessageContentTypedDict = TypeAliasType( + "UserMessageContentTypedDict", Union[str, List[ContentChunkTypedDict]] +) + + +UserMessageContent = TypeAliasType("UserMessageContent", Union[str, List[ContentChunk]]) + + +class UserMessageTypedDict(TypedDict): + content: Nullable[UserMessageContentTypedDict] + role: Literal["user"] + + +class UserMessage(BaseModel): + content: Nullable[UserMessageContent] + + role: Annotated[ + Annotated[Literal["user"], AfterValidator(validate_const("user"))], + pydantic.Field(alias="role"), + ] = "user" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + m[k] = val + + return m + + +try: + UserMessage.model_rebuild() +except NameError: + pass diff --git a/packages/azure/src/mistralai/azure/client/models/validationerror.py b/packages/azure/src/mistralai/azure/client/models/validationerror.py new file mode 100644 index 00000000..817ecf7a --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/validationerror.py @@ -0,0 +1,26 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.azure.client.types import BaseModel +from typing import List, Union +from typing_extensions import TypeAliasType, TypedDict + + +LocTypedDict = TypeAliasType("LocTypedDict", Union[str, int]) + + +Loc = TypeAliasType("Loc", Union[str, int]) + + +class ValidationErrorTypedDict(TypedDict): + loc: List[LocTypedDict] + msg: str + type: str + + +class ValidationError(BaseModel): + loc: List[Loc] + + msg: str + + type: str diff --git a/packages/azure/src/mistralai/azure/client/ocr.py b/packages/azure/src/mistralai/azure/client/ocr.py new file mode 100644 index 00000000..b9270f6a --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/ocr.py @@ -0,0 +1,276 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from mistralai.azure.client import errors, models, utils +from mistralai.azure.client._hooks import HookContext +from mistralai.azure.client.types import Nullable, OptionalNullable, UNSET +from mistralai.azure.client.utils.unmarshal_json_response import unmarshal_json_response +from typing import Any, List, Mapping, Optional, Union + + +class Ocr(BaseSDK): + def process( + self, + *, + model: Nullable[str], + document: Union[models.Document, models.DocumentTypedDict], + id: Optional[str] = None, + pages: OptionalNullable[List[int]] = UNSET, + include_image_base64: OptionalNullable[bool] = UNSET, + image_limit: OptionalNullable[int] = UNSET, + image_min_size: OptionalNullable[int] = UNSET, + bbox_annotation_format: OptionalNullable[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = UNSET, + document_annotation_format: OptionalNullable[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = UNSET, + document_annotation_prompt: OptionalNullable[str] = UNSET, + table_format: OptionalNullable[models.TableFormat] = UNSET, + extract_header: Optional[bool] = None, + extract_footer: Optional[bool] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.OCRResponse: + r"""OCR + + :param model: + :param document: Document to run OCR on + :param id: + :param pages: Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0 + :param include_image_base64: Include image URLs in response + :param image_limit: Max images to extract + :param image_min_size: Minimum height and width of image to extract + :param bbox_annotation_format: Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field + :param document_annotation_format: Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field + :param document_annotation_prompt: Optional prompt to guide the model in extracting structured output from the entire document. A document_annotation_format must be provided. + :param table_format: + :param extract_header: + :param extract_footer: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.OCRRequest( + model=model, + id=id, + document=utils.get_pydantic_model(document, models.Document), + pages=pages, + include_image_base64=include_image_base64, + image_limit=image_limit, + image_min_size=image_min_size, + bbox_annotation_format=utils.get_pydantic_model( + bbox_annotation_format, OptionalNullable[models.ResponseFormat] + ), + document_annotation_format=utils.get_pydantic_model( + document_annotation_format, OptionalNullable[models.ResponseFormat] + ), + document_annotation_prompt=document_annotation_prompt, + table_format=table_format, + extract_header=extract_header, + extract_footer=extract_footer, + ) + + req = self._build_request( + method="POST", + path="/ocr", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.OCRRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="ocr_v1_ocr_post", + oauth2_scopes=None, + security_source=self.sdk_configuration.security, + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.OCRResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def process_async( + self, + *, + model: Nullable[str], + document: Union[models.Document, models.DocumentTypedDict], + id: Optional[str] = None, + pages: OptionalNullable[List[int]] = UNSET, + include_image_base64: OptionalNullable[bool] = UNSET, + image_limit: OptionalNullable[int] = UNSET, + image_min_size: OptionalNullable[int] = UNSET, + bbox_annotation_format: OptionalNullable[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = UNSET, + document_annotation_format: OptionalNullable[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = UNSET, + document_annotation_prompt: OptionalNullable[str] = UNSET, + table_format: OptionalNullable[models.TableFormat] = UNSET, + extract_header: Optional[bool] = None, + extract_footer: Optional[bool] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.OCRResponse: + r"""OCR + + :param model: + :param document: Document to run OCR on + :param id: + :param pages: Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0 + :param include_image_base64: Include image URLs in response + :param image_limit: Max images to extract + :param image_min_size: Minimum height and width of image to extract + :param bbox_annotation_format: Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field + :param document_annotation_format: Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field + :param document_annotation_prompt: Optional prompt to guide the model in extracting structured output from the entire document. A document_annotation_format must be provided. + :param table_format: + :param extract_header: + :param extract_footer: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.OCRRequest( + model=model, + id=id, + document=utils.get_pydantic_model(document, models.Document), + pages=pages, + include_image_base64=include_image_base64, + image_limit=image_limit, + image_min_size=image_min_size, + bbox_annotation_format=utils.get_pydantic_model( + bbox_annotation_format, OptionalNullable[models.ResponseFormat] + ), + document_annotation_format=utils.get_pydantic_model( + document_annotation_format, OptionalNullable[models.ResponseFormat] + ), + document_annotation_prompt=document_annotation_prompt, + table_format=table_format, + extract_header=extract_header, + extract_footer=extract_footer, + ) + + req = self._build_request_async( + method="POST", + path="/ocr", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.OCRRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="ocr_v1_ocr_post", + oauth2_scopes=None, + security_source=self.sdk_configuration.security, + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.OCRResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) diff --git a/packages/azure/src/mistralai/azure/client/py.typed b/packages/azure/src/mistralai/azure/client/py.typed new file mode 100644 index 00000000..3e38f1a9 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/py.typed @@ -0,0 +1 @@ +# Marker file for PEP 561. The package enables type hints. diff --git a/packages/azure/src/mistralai/azure/client/sdk.py b/packages/azure/src/mistralai/azure/client/sdk.py new file mode 100644 index 00000000..985cb9a8 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/sdk.py @@ -0,0 +1,217 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from .httpclient import AsyncHttpClient, ClientOwner, HttpClient, close_clients +from .sdkconfiguration import SDKConfiguration +from .utils.logger import Logger, get_default_logger +from .utils.retries import RetryConfig +import httpx +import importlib +import logging +from mistralai.azure.client import models, utils +from mistralai.azure.client._hooks import SDKHooks +from mistralai.azure.client.types import OptionalNullable, UNSET +import sys +from typing import Callable, Dict, Optional, TYPE_CHECKING, Union, cast +import warnings +import weakref + +logger = logging.getLogger(__name__) + +if TYPE_CHECKING: + from mistralai.azure.client.chat import Chat + from mistralai.azure.client.ocr import Ocr + + +class MistralAzure(BaseSDK): + r"""Mistral AI API: Dora OpenAPI schema + + Our Chat Completion and Embeddings APIs specification. Create your account on [La Plateforme](https://console.mistral.ai) to get access and read the [docs](https://docs.mistral.ai) to learn how to use it. + """ + + chat: "Chat" + r"""Chat Completion API.""" + ocr: "Ocr" + _sub_sdk_map = { + "chat": ("mistralai.azure.client.chat", "Chat"), + "ocr": ("mistralai.azure.client.ocr", "Ocr"), + } + + def __init__( + self, + api_key: Union[str, Callable[[], str]], + server: Optional[str] = None, + server_url: Optional[str] = None, + url_params: Optional[Dict[str, str]] = None, + client: Optional[HttpClient] = None, + async_client: Optional[AsyncHttpClient] = None, + retry_config: OptionalNullable[RetryConfig] = UNSET, + timeout_ms: Optional[int] = None, + debug_logger: Optional[Logger] = None, + api_version: str = "2024-05-01-preview", + ) -> None: + r"""Instantiates the SDK configuring it with the provided parameters. + + :param api_key: The api_key required for authentication + :param server: The server by name to use for all methods + :param server_url: The server URL to use for all methods + :param url_params: Parameters to optionally template the server URL with + :param client: The HTTP client to use for all synchronous methods + :param async_client: The Async HTTP client to use for all asynchronous methods + :param retry_config: The retry configuration to use for all supported methods + :param timeout_ms: Optional request timeout applied to each operation in milliseconds + :param api_version: Azure API version to use (injected as query param) + """ + client_supplied = True + if client is None: + client = httpx.Client( + follow_redirects=True, + params={"api-version": api_version}, + ) + client_supplied = False + elif api_version != "2024-05-01-preview": + warnings.warn( + "api_version is ignored when a custom client is provided. " + "Set the api-version query parameter on your httpx.Client directly.", + stacklevel=2, + ) + + assert issubclass( + type(client), HttpClient + ), "The provided client must implement the HttpClient protocol." + + async_client_supplied = True + if async_client is None: + async_client = httpx.AsyncClient( + follow_redirects=True, + params={"api-version": api_version}, + ) + async_client_supplied = False + elif api_version != "2024-05-01-preview": + warnings.warn( + "api_version is ignored when a custom async_client is provided. " + "Set the api-version query parameter on your httpx.AsyncClient directly.", + stacklevel=2, + ) + + if debug_logger is None: + debug_logger = get_default_logger() + + assert issubclass( + type(async_client), AsyncHttpClient + ), "The provided async_client must implement the AsyncHttpClient protocol." + + security: Union[models.Security, Callable[[], models.Security]] + if callable(api_key): + + def get_security() -> models.Security: + return models.Security(api_key=api_key()) + + security = get_security + else: + security = models.Security(api_key=api_key) + + if server_url is not None: + if url_params is not None: + server_url = utils.template_url(server_url, url_params) + + BaseSDK.__init__( + self, + SDKConfiguration( + client=client, + client_supplied=client_supplied, + async_client=async_client, + async_client_supplied=async_client_supplied, + security=security, + server_url=server_url, + server=server, + retry_config=retry_config, + timeout_ms=timeout_ms, + debug_logger=debug_logger, + ), + parent_ref=self, + ) + + hooks = SDKHooks() + self.sdk_configuration.__dict__["_hooks"] = hooks + + current_server_url, *_ = self.sdk_configuration.get_server_details() + server_url, self.sdk_configuration.client = hooks.sdk_init( + current_server_url, client + ) + if current_server_url != server_url: + self.sdk_configuration.server_url = server_url + + weakref.finalize( + self, + close_clients, + cast(ClientOwner, self.sdk_configuration), + self.sdk_configuration.client, + self.sdk_configuration.client_supplied, + self.sdk_configuration.async_client, + self.sdk_configuration.async_client_supplied, + ) + + def dynamic_import(self, modname, retries=3): + last_exc: Optional[Exception] = None + for attempt in range(retries): + try: + return importlib.import_module(modname) + except (KeyError, ImportError, ModuleNotFoundError) as e: + last_exc = e + # Clear any half-initialized module and retry + sys.modules.pop(modname, None) + if attempt == retries - 1: + break + raise ImportError( + f"Failed to import module '{modname}' after {retries} attempts" + ) from last_exc + + def __getattr__(self, name: str): + if name in self._sub_sdk_map: + module_path, class_name = self._sub_sdk_map[name] + try: + module = self.dynamic_import(module_path) + klass = getattr(module, class_name) + instance = klass(self.sdk_configuration, parent_ref=self) + setattr(self, name, instance) + return instance + except ImportError as e: + raise AttributeError( + f"Failed to import module {module_path} for attribute {name}: {e}" + ) from e + except AttributeError as e: + raise AttributeError( + f"Failed to find class {class_name} in module {module_path} for attribute {name}: {e}" + ) from e + + raise AttributeError( + f"'{type(self).__name__}' object has no attribute '{name}'" + ) + + def __dir__(self): + default_attrs = list(super().__dir__()) + lazy_attrs = list(self._sub_sdk_map.keys()) + return sorted(list(set(default_attrs + lazy_attrs))) + + def __enter__(self): + return self + + async def __aenter__(self): + return self + + def __exit__(self, _exc_type, _exc_val, _exc_tb): + if ( + self.sdk_configuration.client is not None + and not self.sdk_configuration.client_supplied + ): + self.sdk_configuration.client.close() + self.sdk_configuration.client = None + + async def __aexit__(self, _exc_type, _exc_val, _exc_tb): + if ( + self.sdk_configuration.async_client is not None + and not self.sdk_configuration.async_client_supplied + ): + await self.sdk_configuration.async_client.aclose() + self.sdk_configuration.async_client = None diff --git a/packages/azure/src/mistralai/azure/client/sdkconfiguration.py b/packages/azure/src/mistralai/azure/client/sdkconfiguration.py new file mode 100644 index 00000000..919225f9 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/sdkconfiguration.py @@ -0,0 +1,53 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from ._version import ( + __gen_version__, + __openapi_doc_version__, + __user_agent__, + __version__, +) +from .httpclient import AsyncHttpClient, HttpClient +from .utils import Logger, RetryConfig, remove_suffix +from dataclasses import dataclass +from mistralai.azure.client import models +from mistralai.azure.client.types import OptionalNullable, UNSET +from pydantic import Field +from typing import Callable, Dict, Optional, Tuple, Union + + +SERVER_EU = "eu" +r"""EU Production server""" +SERVERS = { + SERVER_EU: "https://api.mistral.ai", +} +"""Contains the list of servers available to the SDK""" + + +@dataclass +class SDKConfiguration: + client: Union[HttpClient, None] + client_supplied: bool + async_client: Union[AsyncHttpClient, None] + async_client_supplied: bool + debug_logger: Logger + security: Optional[Union[models.Security, Callable[[], models.Security]]] = None + server_url: Optional[str] = "" + server: Optional[str] = "" + language: str = "python" + openapi_doc_version: str = __openapi_doc_version__ + sdk_version: str = __version__ + gen_version: str = __gen_version__ + user_agent: str = __user_agent__ + retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) + timeout_ms: Optional[int] = None + + def get_server_details(self) -> Tuple[str, Dict[str, str]]: + if self.server_url is not None and self.server_url: + return remove_suffix(self.server_url, "/"), {} + if not self.server: + self.server = SERVER_EU + + if self.server not in SERVERS: + raise ValueError(f'Invalid server "{self.server}"') + + return SERVERS[self.server], {} diff --git a/packages/azure/src/mistralai/azure/client/types/__init__.py b/packages/azure/src/mistralai/azure/client/types/__init__.py new file mode 100644 index 00000000..fc76fe0c --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/types/__init__.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basemodel import ( + BaseModel, + Nullable, + OptionalNullable, + UnrecognizedInt, + UnrecognizedStr, + UNSET, + UNSET_SENTINEL, +) + +__all__ = [ + "BaseModel", + "Nullable", + "OptionalNullable", + "UnrecognizedInt", + "UnrecognizedStr", + "UNSET", + "UNSET_SENTINEL", +] diff --git a/packages/azure/src/mistralai/azure/client/types/basemodel.py b/packages/azure/src/mistralai/azure/client/types/basemodel.py new file mode 100644 index 00000000..a9a640a1 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/types/basemodel.py @@ -0,0 +1,77 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from pydantic import ConfigDict, model_serializer +from pydantic import BaseModel as PydanticBaseModel +from pydantic_core import core_schema +from typing import TYPE_CHECKING, Any, Literal, Optional, TypeVar, Union +from typing_extensions import TypeAliasType, TypeAlias + + +class BaseModel(PydanticBaseModel): + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, protected_namespaces=() + ) + + +class Unset(BaseModel): + @model_serializer(mode="plain") + def serialize_model(self): + return UNSET_SENTINEL + + def __bool__(self) -> Literal[False]: + return False + + +UNSET = Unset() +UNSET_SENTINEL = "~?~unset~?~sentinel~?~" + + +T = TypeVar("T") +if TYPE_CHECKING: + Nullable: TypeAlias = Union[T, None] + OptionalNullable: TypeAlias = Union[Optional[Nullable[T]], Unset] +else: + Nullable = TypeAliasType("Nullable", Union[T, None], type_params=(T,)) + OptionalNullable = TypeAliasType( + "OptionalNullable", Union[Optional[Nullable[T]], Unset], type_params=(T,) + ) + + +class UnrecognizedStr(str): + @classmethod + def __get_pydantic_core_schema__(cls, _source_type: Any, _handler: Any) -> core_schema.CoreSchema: + # Make UnrecognizedStr only work in lax mode, not strict mode + # This makes it a "fallback" option when more specific types (like Literals) don't match + def validate_lax(v: Any) -> 'UnrecognizedStr': + if isinstance(v, cls): + return v + return cls(str(v)) + + # Use lax_or_strict_schema where strict always fails + # This forces Pydantic to prefer other union members in strict mode + # and only fall back to UnrecognizedStr in lax mode + return core_schema.lax_or_strict_schema( + lax_schema=core_schema.chain_schema([ + core_schema.str_schema(), + core_schema.no_info_plain_validator_function(validate_lax) + ]), + strict_schema=core_schema.none_schema(), # Always fails in strict mode + ) + + +class UnrecognizedInt(int): + @classmethod + def __get_pydantic_core_schema__(cls, _source_type: Any, _handler: Any) -> core_schema.CoreSchema: + # Make UnrecognizedInt only work in lax mode, not strict mode + # This makes it a "fallback" option when more specific types (like Literals) don't match + def validate_lax(v: Any) -> 'UnrecognizedInt': + if isinstance(v, cls): + return v + return cls(int(v)) + return core_schema.lax_or_strict_schema( + lax_schema=core_schema.chain_schema([ + core_schema.int_schema(), + core_schema.no_info_plain_validator_function(validate_lax) + ]), + strict_schema=core_schema.none_schema(), # Always fails in strict mode + ) diff --git a/packages/azure/src/mistralai/azure/client/utils/__init__.py b/packages/azure/src/mistralai/azure/client/utils/__init__.py new file mode 100644 index 00000000..b488c2df --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/utils/__init__.py @@ -0,0 +1,178 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from typing import Any, TYPE_CHECKING, Callable, TypeVar +import asyncio + +from .dynamic_imports import lazy_getattr, lazy_dir + +_T = TypeVar("_T") + + +async def run_sync_in_thread(func: Callable[..., _T], *args) -> _T: + """Run a synchronous function in a thread pool to avoid blocking the event loop.""" + return await asyncio.to_thread(func, *args) + + +if TYPE_CHECKING: + from .annotations import get_discriminator + from .datetimes import parse_datetime + from .enums import OpenEnumMeta + from .unions import parse_open_union + from .headers import get_headers, get_response_headers + from .metadata import ( + FieldMetadata, + find_metadata, + FormMetadata, + HeaderMetadata, + MultipartFormMetadata, + PathParamMetadata, + QueryParamMetadata, + RequestMetadata, + SecurityMetadata, + ) + from .queryparams import get_query_params + from .retries import BackoffStrategy, Retries, retry, retry_async, RetryConfig + from .requestbodies import serialize_request_body, SerializedRequestBody + from .security import get_security + from .serializers import ( + get_pydantic_model, + marshal_json, + unmarshal, + unmarshal_json, + serialize_decimal, + serialize_float, + serialize_int, + stream_to_text, + stream_to_text_async, + stream_to_bytes, + stream_to_bytes_async, + validate_const, + validate_decimal, + validate_float, + validate_int, + ) + from .url import generate_url, template_url, remove_suffix + from .values import ( + get_global_from_env, + match_content_type, + match_status_codes, + match_response, + cast_partial, + ) + from .logger import Logger, get_body_content, get_default_logger + +__all__ = [ + "BackoffStrategy", + "FieldMetadata", + "find_metadata", + "FormMetadata", + "generate_url", + "get_body_content", + "get_default_logger", + "get_discriminator", + "parse_datetime", + "get_global_from_env", + "get_headers", + "get_pydantic_model", + "get_query_params", + "get_response_headers", + "get_security", + "HeaderMetadata", + "Logger", + "marshal_json", + "match_content_type", + "match_status_codes", + "match_response", + "MultipartFormMetadata", + "OpenEnumMeta", + "parse_open_union", + "PathParamMetadata", + "QueryParamMetadata", + "remove_suffix", + "Retries", + "retry", + "retry_async", + "RetryConfig", + "RequestMetadata", + "SecurityMetadata", + "serialize_decimal", + "serialize_float", + "serialize_int", + "serialize_request_body", + "SerializedRequestBody", + "stream_to_text", + "stream_to_text_async", + "stream_to_bytes", + "stream_to_bytes_async", + "template_url", + "unmarshal", + "unmarshal_json", + "validate_decimal", + "validate_const", + "validate_float", + "validate_int", + "cast_partial", +] + +_dynamic_imports: dict[str, str] = { + "BackoffStrategy": ".retries", + "FieldMetadata": ".metadata", + "find_metadata": ".metadata", + "FormMetadata": ".metadata", + "generate_url": ".url", + "get_body_content": ".logger", + "get_default_logger": ".logger", + "get_discriminator": ".annotations", + "parse_datetime": ".datetimes", + "get_global_from_env": ".values", + "get_headers": ".headers", + "get_pydantic_model": ".serializers", + "get_query_params": ".queryparams", + "get_response_headers": ".headers", + "get_security": ".security", + "HeaderMetadata": ".metadata", + "Logger": ".logger", + "marshal_json": ".serializers", + "match_content_type": ".values", + "match_status_codes": ".values", + "match_response": ".values", + "MultipartFormMetadata": ".metadata", + "OpenEnumMeta": ".enums", + "parse_open_union": ".unions", + "PathParamMetadata": ".metadata", + "QueryParamMetadata": ".metadata", + "remove_suffix": ".url", + "Retries": ".retries", + "retry": ".retries", + "retry_async": ".retries", + "RetryConfig": ".retries", + "RequestMetadata": ".metadata", + "SecurityMetadata": ".metadata", + "serialize_decimal": ".serializers", + "serialize_float": ".serializers", + "serialize_int": ".serializers", + "serialize_request_body": ".requestbodies", + "SerializedRequestBody": ".requestbodies", + "stream_to_text": ".serializers", + "stream_to_text_async": ".serializers", + "stream_to_bytes": ".serializers", + "stream_to_bytes_async": ".serializers", + "template_url": ".url", + "unmarshal": ".serializers", + "unmarshal_json": ".serializers", + "validate_decimal": ".serializers", + "validate_const": ".serializers", + "validate_float": ".serializers", + "validate_int": ".serializers", + "cast_partial": ".values", +} + + +def __getattr__(attr_name: str) -> Any: + return lazy_getattr( + attr_name, package=__package__, dynamic_imports=_dynamic_imports + ) + + +def __dir__(): + return lazy_dir(dynamic_imports=_dynamic_imports) diff --git a/packages/azure/src/mistralai/azure/client/utils/annotations.py b/packages/azure/src/mistralai/azure/client/utils/annotations.py new file mode 100644 index 00000000..12e0aa4f --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/utils/annotations.py @@ -0,0 +1,79 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from enum import Enum +from typing import Any, Optional + + +def get_discriminator(model: Any, fieldname: str, key: str) -> str: + """ + Recursively search for the discriminator attribute in a model. + + Args: + model (Any): The model to search within. + fieldname (str): The name of the field to search for. + key (str): The key to search for in dictionaries. + + Returns: + str: The name of the discriminator attribute. + + Raises: + ValueError: If the discriminator attribute is not found. + """ + upper_fieldname = fieldname.upper() + + def get_field_discriminator(field: Any) -> Optional[str]: + """Search for the discriminator attribute in a given field.""" + + if isinstance(field, dict): + if key in field: + return f"{field[key]}" + + if hasattr(field, fieldname): + attr = getattr(field, fieldname) + if isinstance(attr, Enum): + return f"{attr.value}" + return f"{attr}" + + if hasattr(field, upper_fieldname): + attr = getattr(field, upper_fieldname) + if isinstance(attr, Enum): + return f"{attr.value}" + return f"{attr}" + + return None + + def search_nested_discriminator(obj: Any) -> Optional[str]: + """Recursively search for discriminator in nested structures.""" + # First try direct field lookup + discriminator = get_field_discriminator(obj) + if discriminator is not None: + return discriminator + + # If it's a dict, search in nested values + if isinstance(obj, dict): + for value in obj.values(): + if isinstance(value, list): + # Search in list items + for item in value: + nested_discriminator = search_nested_discriminator(item) + if nested_discriminator is not None: + return nested_discriminator + elif isinstance(value, dict): + # Search in nested dict + nested_discriminator = search_nested_discriminator(value) + if nested_discriminator is not None: + return nested_discriminator + + return None + + if isinstance(model, list): + for field in model: + discriminator = search_nested_discriminator(field) + if discriminator is not None: + return discriminator + + discriminator = search_nested_discriminator(model) + if discriminator is not None: + return discriminator + + raise ValueError(f"Could not find discriminator field {fieldname} in {model}") diff --git a/packages/azure/src/mistralai/azure/client/utils/datetimes.py b/packages/azure/src/mistralai/azure/client/utils/datetimes.py new file mode 100644 index 00000000..a6c52cd6 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/utils/datetimes.py @@ -0,0 +1,23 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from datetime import datetime +import sys + + +def parse_datetime(datetime_string: str) -> datetime: + """ + Convert a RFC 3339 / ISO 8601 formatted string into a datetime object. + Python versions 3.11 and later support parsing RFC 3339 directly with + datetime.fromisoformat(), but for earlier versions, this function + encapsulates the necessary extra logic. + """ + # Python 3.11 and later can parse RFC 3339 directly + if sys.version_info >= (3, 11): + return datetime.fromisoformat(datetime_string) + + # For Python 3.10 and earlier, a common ValueError is trailing 'Z' suffix, + # so fix that upfront. + if datetime_string.endswith("Z"): + datetime_string = datetime_string[:-1] + "+00:00" + + return datetime.fromisoformat(datetime_string) diff --git a/packages/azure/src/mistralai/azure/client/utils/dynamic_imports.py b/packages/azure/src/mistralai/azure/client/utils/dynamic_imports.py new file mode 100644 index 00000000..673edf82 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/utils/dynamic_imports.py @@ -0,0 +1,54 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from importlib import import_module +import builtins +import sys + + +def dynamic_import(package, modname, retries=3): + """Import a module relative to package, retrying on KeyError from half-initialized modules.""" + for attempt in range(retries): + try: + return import_module(modname, package) + except KeyError: + sys.modules.pop(modname, None) + if attempt == retries - 1: + break + raise KeyError(f"Failed to import module '{modname}' after {retries} attempts") + + +def lazy_getattr(attr_name, *, package, dynamic_imports, sub_packages=None): + """Module-level __getattr__ that lazily loads from a dynamic_imports mapping. + + Args: + attr_name: The attribute being looked up. + package: The caller's __package__ (for relative imports). + dynamic_imports: Dict mapping attribute names to relative module paths. + sub_packages: Optional list of subpackage names to lazy-load. + """ + module_name = dynamic_imports.get(attr_name) + if module_name is not None: + try: + module = dynamic_import(package, module_name) + return getattr(module, attr_name) + except ImportError as e: + raise ImportError( + f"Failed to import {attr_name} from {module_name}: {e}" + ) from e + except AttributeError as e: + raise AttributeError( + f"Failed to get {attr_name} from {module_name}: {e}" + ) from e + + if sub_packages and attr_name in sub_packages: + return import_module(f".{attr_name}", package) + + raise AttributeError(f"module '{package}' has no attribute '{attr_name}'") + + +def lazy_dir(*, dynamic_imports, sub_packages=None): + """Module-level __dir__ that lists lazily-loadable attributes.""" + lazy_attrs = builtins.list(dynamic_imports.keys()) + if sub_packages: + lazy_attrs.extend(sub_packages) + return builtins.sorted(lazy_attrs) diff --git a/packages/azure/src/mistralai/azure/client/utils/enums.py b/packages/azure/src/mistralai/azure/client/utils/enums.py new file mode 100644 index 00000000..3324e1bc --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/utils/enums.py @@ -0,0 +1,134 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import enum +import sys +from typing import Any + +from pydantic_core import core_schema + + +class OpenEnumMeta(enum.EnumMeta): + # The __call__ method `boundary` kwarg was added in 3.11 and must be present + # for pyright. Refer also: https://github.com/pylint-dev/pylint/issues/9622 + # pylint: disable=unexpected-keyword-arg + # The __call__ method `values` varg must be named for pyright. + # pylint: disable=keyword-arg-before-vararg + + if sys.version_info >= (3, 11): + def __call__( + cls, value, names=None, *values, module=None, qualname=None, type=None, start=1, boundary=None + ): + # The `type` kwarg also happens to be a built-in that pylint flags as + # redeclared. Safe to ignore this lint rule with this scope. + # pylint: disable=redefined-builtin + + if names is not None: + return super().__call__( + value, + names=names, + *values, + module=module, + qualname=qualname, + type=type, + start=start, + boundary=boundary, + ) + + try: + return super().__call__( + value, + names=names, # pyright: ignore[reportArgumentType] + *values, + module=module, + qualname=qualname, + type=type, + start=start, + boundary=boundary, + ) + except ValueError: + return value + else: + def __call__( + cls, value, names=None, *, module=None, qualname=None, type=None, start=1 + ): + # The `type` kwarg also happens to be a built-in that pylint flags as + # redeclared. Safe to ignore this lint rule with this scope. + # pylint: disable=redefined-builtin + + if names is not None: + return super().__call__( + value, + names=names, + module=module, + qualname=qualname, + type=type, + start=start, + ) + + try: + return super().__call__( + value, + names=names, # pyright: ignore[reportArgumentType] + module=module, + qualname=qualname, + type=type, + start=start, + ) + except ValueError: + return value + + def __new__(mcs, name, bases, namespace, **kwargs): + cls = super().__new__(mcs, name, bases, namespace, **kwargs) + + # Add __get_pydantic_core_schema__ to make open enums work correctly + # in union discrimination. In strict mode (used by Pydantic for unions), + # only known enum values match. In lax mode, unknown values are accepted. + def __get_pydantic_core_schema__( + cls_inner: Any, _source_type: Any, _handler: Any + ) -> core_schema.CoreSchema: + # Create a validator that only accepts known enum values (for strict mode) + def validate_strict(v: Any) -> Any: + if isinstance(v, cls_inner): + return v + # Use the parent EnumMeta's __call__ which raises ValueError for unknown values + return enum.EnumMeta.__call__(cls_inner, v) + + # Create a lax validator that accepts unknown values + def validate_lax(v: Any) -> Any: + if isinstance(v, cls_inner): + return v + try: + return enum.EnumMeta.__call__(cls_inner, v) + except ValueError: + # Return the raw value for unknown enum values + return v + + # Determine the base type schema (str or int) + is_int_enum = False + for base in cls_inner.__mro__: + if base is int: + is_int_enum = True + break + if base is str: + break + + base_schema = ( + core_schema.int_schema() + if is_int_enum + else core_schema.str_schema() + ) + + # Use lax_or_strict_schema: + # - strict mode: only known enum values match (raises ValueError for unknown) + # - lax mode: accept any value, return enum member or raw value + return core_schema.lax_or_strict_schema( + lax_schema=core_schema.chain_schema( + [base_schema, core_schema.no_info_plain_validator_function(validate_lax)] + ), + strict_schema=core_schema.chain_schema( + [base_schema, core_schema.no_info_plain_validator_function(validate_strict)] + ), + ) + + setattr(cls, "__get_pydantic_core_schema__", classmethod(__get_pydantic_core_schema__)) + return cls diff --git a/packages/azure/src/mistralai/azure/client/utils/eventstreaming.py b/packages/azure/src/mistralai/azure/client/utils/eventstreaming.py new file mode 100644 index 00000000..f2052fc2 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/utils/eventstreaming.py @@ -0,0 +1,280 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import re +import json +from dataclasses import dataclass, asdict +from typing import ( + Any, + Callable, + Generic, + TypeVar, + Optional, + Generator, + AsyncGenerator, + Tuple, +) +import httpx + +T = TypeVar("T") + + +class EventStream(Generic[T]): + # Holds a reference to the SDK client to avoid it being garbage collected + # and cause termination of the underlying httpx client. + client_ref: Optional[object] + response: httpx.Response + generator: Generator[T, None, None] + _closed: bool + + def __init__( + self, + response: httpx.Response, + decoder: Callable[[str], T], + sentinel: Optional[str] = None, + client_ref: Optional[object] = None, + ): + self.response = response + self.generator = stream_events(response, decoder, sentinel) + self.client_ref = client_ref + self._closed = False + + def __iter__(self): + return self + + def __next__(self): + if self._closed: + raise StopIteration + return next(self.generator) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self._closed = True + self.response.close() + + +class EventStreamAsync(Generic[T]): + # Holds a reference to the SDK client to avoid it being garbage collected + # and cause termination of the underlying httpx client. + client_ref: Optional[object] + response: httpx.Response + generator: AsyncGenerator[T, None] + _closed: bool + + def __init__( + self, + response: httpx.Response, + decoder: Callable[[str], T], + sentinel: Optional[str] = None, + client_ref: Optional[object] = None, + ): + self.response = response + self.generator = stream_events_async(response, decoder, sentinel) + self.client_ref = client_ref + self._closed = False + + def __aiter__(self): + return self + + async def __anext__(self): + if self._closed: + raise StopAsyncIteration + return await self.generator.__anext__() + + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc_val, exc_tb): + self._closed = True + await self.response.aclose() + + +@dataclass +class ServerEvent: + id: Optional[str] = None + event: Optional[str] = None + data: Any = None + retry: Optional[int] = None + + +MESSAGE_BOUNDARIES = [ + b"\r\n\r\n", + b"\r\n\r", + b"\r\n\n", + b"\r\r\n", + b"\n\r\n", + b"\r\r", + b"\n\r", + b"\n\n", +] + +UTF8_BOM = b"\xef\xbb\xbf" + + +async def stream_events_async( + response: httpx.Response, + decoder: Callable[[str], T], + sentinel: Optional[str] = None, +) -> AsyncGenerator[T, None]: + buffer = bytearray() + position = 0 + event_id: Optional[str] = None + async for chunk in response.aiter_bytes(): + if len(buffer) == 0 and chunk.startswith(UTF8_BOM): + chunk = chunk[len(UTF8_BOM) :] + buffer += chunk + for i in range(position, len(buffer)): + char = buffer[i : i + 1] + seq: Optional[bytes] = None + if char in [b"\r", b"\n"]: + for boundary in MESSAGE_BOUNDARIES: + seq = _peek_sequence(i, buffer, boundary) + if seq is not None: + break + if seq is None: + continue + + block = buffer[position:i] + position = i + len(seq) + event, discard, event_id = _parse_event( + raw=block, decoder=decoder, sentinel=sentinel, event_id=event_id + ) + if event is not None: + yield event + if discard: + await response.aclose() + return + + if position > 0: + buffer = buffer[position:] + position = 0 + + event, discard, _ = _parse_event( + raw=buffer, decoder=decoder, sentinel=sentinel, event_id=event_id + ) + if event is not None: + yield event + + +def stream_events( + response: httpx.Response, + decoder: Callable[[str], T], + sentinel: Optional[str] = None, +) -> Generator[T, None, None]: + buffer = bytearray() + position = 0 + event_id: Optional[str] = None + for chunk in response.iter_bytes(): + if len(buffer) == 0 and chunk.startswith(UTF8_BOM): + chunk = chunk[len(UTF8_BOM) :] + buffer += chunk + for i in range(position, len(buffer)): + char = buffer[i : i + 1] + seq: Optional[bytes] = None + if char in [b"\r", b"\n"]: + for boundary in MESSAGE_BOUNDARIES: + seq = _peek_sequence(i, buffer, boundary) + if seq is not None: + break + if seq is None: + continue + + block = buffer[position:i] + position = i + len(seq) + event, discard, event_id = _parse_event( + raw=block, decoder=decoder, sentinel=sentinel, event_id=event_id + ) + if event is not None: + yield event + if discard: + response.close() + return + + if position > 0: + buffer = buffer[position:] + position = 0 + + event, discard, _ = _parse_event( + raw=buffer, decoder=decoder, sentinel=sentinel, event_id=event_id + ) + if event is not None: + yield event + + +def _parse_event( + *, + raw: bytearray, + decoder: Callable[[str], T], + sentinel: Optional[str] = None, + event_id: Optional[str] = None, +) -> Tuple[Optional[T], bool, Optional[str]]: + block = raw.decode() + lines = re.split(r"\r?\n|\r", block) + publish = False + event = ServerEvent() + data = "" + for line in lines: + if not line: + continue + + delim = line.find(":") + if delim == 0: + continue + + field = line + value = "" + if delim > 0: + field = line[0:delim] + value = line[delim + 1 :] if delim < len(line) - 1 else "" + if len(value) and value[0] == " ": + value = value[1:] + + if field == "event": + event.event = value + publish = True + elif field == "data": + data += value + "\n" + publish = True + elif field == "id": + publish = True + if "\x00" not in value: + event_id = value + elif field == "retry": + if value.isdigit(): + event.retry = int(value) + publish = True + + event.id = event_id + + if sentinel and data == f"{sentinel}\n": + return None, True, event_id + + if data: + data = data[:-1] + try: + event.data = json.loads(data) + except json.JSONDecodeError: + event.data = data + + out = None + if publish: + out_dict = { + k: v + for k, v in asdict(event).items() + if v is not None or (k == "data" and data) + } + out = decoder(json.dumps(out_dict)) + + return out, False, event_id + + +def _peek_sequence(position: int, buffer: bytearray, sequence: bytes): + if len(sequence) > (len(buffer) - position): + return None + + for i, seq in enumerate(sequence): + if buffer[position + i] != seq: + return None + + return sequence diff --git a/packages/azure/src/mistralai/azure/client/utils/forms.py b/packages/azure/src/mistralai/azure/client/utils/forms.py new file mode 100644 index 00000000..1e550bd5 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/utils/forms.py @@ -0,0 +1,234 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from typing import ( + Any, + Dict, + get_type_hints, + List, + Tuple, +) +from pydantic import BaseModel +from pydantic.fields import FieldInfo + +from .serializers import marshal_json + +from .metadata import ( + FormMetadata, + MultipartFormMetadata, + find_field_metadata, +) +from .values import _is_set, _val_to_string + + +def _populate_form( + field_name: str, + explode: bool, + obj: Any, + delimiter: str, + form: Dict[str, List[str]], +): + if not _is_set(obj): + return form + + if isinstance(obj, BaseModel): + items = [] + + obj_fields: Dict[str, FieldInfo] = obj.__class__.model_fields + for name in obj_fields: + obj_field = obj_fields[name] + obj_field_name = obj_field.alias if obj_field.alias is not None else name + if obj_field_name == "": + continue + + val = getattr(obj, name) + if not _is_set(val): + continue + + if explode: + form[obj_field_name] = [_val_to_string(val)] + else: + items.append(f"{obj_field_name}{delimiter}{_val_to_string(val)}") + + if len(items) > 0: + form[field_name] = [delimiter.join(items)] + elif isinstance(obj, Dict): + items = [] + for key, value in obj.items(): + if not _is_set(value): + continue + + if explode: + form[key] = [_val_to_string(value)] + else: + items.append(f"{key}{delimiter}{_val_to_string(value)}") + + if len(items) > 0: + form[field_name] = [delimiter.join(items)] + elif isinstance(obj, List): + items = [] + + for value in obj: + if not _is_set(value): + continue + + if explode: + if not field_name in form: + form[field_name] = [] + form[field_name].append(_val_to_string(value)) + else: + items.append(_val_to_string(value)) + + if len(items) > 0: + form[field_name] = [delimiter.join([str(item) for item in items])] + else: + form[field_name] = [_val_to_string(obj)] + + return form + + +def _extract_file_properties(file_obj: Any) -> Tuple[str, Any, Any]: + """Extract file name, content, and content type from a file object.""" + file_fields: Dict[str, FieldInfo] = file_obj.__class__.model_fields + + file_name = "" + content = None + content_type = None + + for file_field_name in file_fields: + file_field = file_fields[file_field_name] + + file_metadata = find_field_metadata(file_field, MultipartFormMetadata) + if file_metadata is None: + continue + + if file_metadata.content: + content = getattr(file_obj, file_field_name, None) + elif file_field_name == "content_type": + content_type = getattr(file_obj, file_field_name, None) + else: + file_name = getattr(file_obj, file_field_name) + + if file_name == "" or content is None: + raise ValueError("invalid multipart/form-data file") + + return file_name, content, content_type + + +def serialize_multipart_form( + media_type: str, request: Any +) -> Tuple[str, Dict[str, Any], List[Tuple[str, Any]]]: + form: Dict[str, Any] = {} + files: List[Tuple[str, Any]] = [] + + if not isinstance(request, BaseModel): + raise TypeError("invalid request body type") + + request_fields: Dict[str, FieldInfo] = request.__class__.model_fields + request_field_types = get_type_hints(request.__class__) + + for name in request_fields: + field = request_fields[name] + + val = getattr(request, name) + if not _is_set(val): + continue + + field_metadata = find_field_metadata(field, MultipartFormMetadata) + if not field_metadata: + continue + + f_name = field.alias if field.alias else name + + if field_metadata.file: + if isinstance(val, List): + # Handle array of files + array_field_name = f_name + for file_obj in val: + if not _is_set(file_obj): + continue + + file_name, content, content_type = _extract_file_properties( + file_obj + ) + + if content_type is not None: + files.append( + (array_field_name, (file_name, content, content_type)) + ) + else: + files.append((array_field_name, (file_name, content))) + else: + # Handle single file + file_name, content, content_type = _extract_file_properties(val) + + if content_type is not None: + files.append((f_name, (file_name, content, content_type))) + else: + files.append((f_name, (file_name, content))) + elif field_metadata.json: + files.append( + ( + f_name, + ( + None, + marshal_json(val, request_field_types[name]), + "application/json", + ), + ) + ) + else: + if isinstance(val, List): + values = [] + + for value in val: + if not _is_set(value): + continue + values.append(_val_to_string(value)) + + array_field_name = f_name + form[array_field_name] = values + else: + form[f_name] = _val_to_string(val) + return media_type, form, files + + +def serialize_form_data(data: Any) -> Dict[str, Any]: + form: Dict[str, List[str]] = {} + + if isinstance(data, BaseModel): + data_fields: Dict[str, FieldInfo] = data.__class__.model_fields + data_field_types = get_type_hints(data.__class__) + for name in data_fields: + field = data_fields[name] + + val = getattr(data, name) + if not _is_set(val): + continue + + metadata = find_field_metadata(field, FormMetadata) + if metadata is None: + continue + + f_name = field.alias if field.alias is not None else name + + if metadata.json: + form[f_name] = [marshal_json(val, data_field_types[name])] + else: + if metadata.style == "form": + _populate_form( + f_name, + metadata.explode, + val, + ",", + form, + ) + else: + raise ValueError(f"Invalid form style for field {name}") + elif isinstance(data, Dict): + for key, value in data.items(): + if _is_set(value): + form[key] = [_val_to_string(value)] + else: + raise TypeError(f"Invalid request body type {type(data)} for form data") + + return form diff --git a/packages/azure/src/mistralai/azure/client/utils/headers.py b/packages/azure/src/mistralai/azure/client/utils/headers.py new file mode 100644 index 00000000..37864cbb --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/utils/headers.py @@ -0,0 +1,136 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from typing import ( + Any, + Dict, + List, + Optional, +) +from httpx import Headers +from pydantic import BaseModel +from pydantic.fields import FieldInfo + +from .metadata import ( + HeaderMetadata, + find_field_metadata, +) + +from .values import _is_set, _populate_from_globals, _val_to_string + + +def get_headers(headers_params: Any, gbls: Optional[Any] = None) -> Dict[str, str]: + headers: Dict[str, str] = {} + + globals_already_populated = [] + if _is_set(headers_params): + globals_already_populated = _populate_headers(headers_params, gbls, headers, []) + if _is_set(gbls): + _populate_headers(gbls, None, headers, globals_already_populated) + + return headers + + +def _populate_headers( + headers_params: Any, + gbls: Any, + header_values: Dict[str, str], + skip_fields: List[str], +) -> List[str]: + globals_already_populated: List[str] = [] + + if not isinstance(headers_params, BaseModel): + return globals_already_populated + + param_fields: Dict[str, FieldInfo] = headers_params.__class__.model_fields + for name in param_fields: + if name in skip_fields: + continue + + field = param_fields[name] + f_name = field.alias if field.alias is not None else name + + metadata = find_field_metadata(field, HeaderMetadata) + if metadata is None: + continue + + value, global_found = _populate_from_globals( + name, getattr(headers_params, name), HeaderMetadata, gbls + ) + if global_found: + globals_already_populated.append(name) + value = _serialize_header(metadata.explode, value) + + if value != "": + header_values[f_name] = value + + return globals_already_populated + + +def _serialize_header(explode: bool, obj: Any) -> str: + if not _is_set(obj): + return "" + + if isinstance(obj, BaseModel): + items = [] + obj_fields: Dict[str, FieldInfo] = obj.__class__.model_fields + for name in obj_fields: + obj_field = obj_fields[name] + obj_param_metadata = find_field_metadata(obj_field, HeaderMetadata) + + if not obj_param_metadata: + continue + + f_name = obj_field.alias if obj_field.alias is not None else name + + val = getattr(obj, name) + if not _is_set(val): + continue + + if explode: + items.append(f"{f_name}={_val_to_string(val)}") + else: + items.append(f_name) + items.append(_val_to_string(val)) + + if len(items) > 0: + return ",".join(items) + elif isinstance(obj, Dict): + items = [] + + for key, value in obj.items(): + if not _is_set(value): + continue + + if explode: + items.append(f"{key}={_val_to_string(value)}") + else: + items.append(key) + items.append(_val_to_string(value)) + + if len(items) > 0: + return ",".join([str(item) for item in items]) + elif isinstance(obj, List): + items = [] + + for value in obj: + if not _is_set(value): + continue + + items.append(_val_to_string(value)) + + if len(items) > 0: + return ",".join(items) + elif _is_set(obj): + return f"{_val_to_string(obj)}" + + return "" + + +def get_response_headers(headers: Headers) -> Dict[str, List[str]]: + res: Dict[str, List[str]] = {} + for k, v in headers.items(): + if not k in res: + res[k] = [] + + res[k].append(v) + return res diff --git a/packages/azure/src/mistralai/azure/client/utils/logger.py b/packages/azure/src/mistralai/azure/client/utils/logger.py new file mode 100644 index 00000000..b661aff6 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/utils/logger.py @@ -0,0 +1,22 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import httpx +from typing import Any, Protocol + + +class Logger(Protocol): + def debug(self, msg: str, *args: Any, **kwargs: Any) -> None: + pass + + +class NoOpLogger: + def debug(self, msg: str, *args: Any, **kwargs: Any) -> None: + pass + + +def get_body_content(req: httpx.Request) -> str: + return "" if not hasattr(req, "_content") else str(req.content) + + +def get_default_logger() -> Logger: + return NoOpLogger() diff --git a/packages/azure/src/mistralai/azure/client/utils/metadata.py b/packages/azure/src/mistralai/azure/client/utils/metadata.py new file mode 100644 index 00000000..173b3e5c --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/utils/metadata.py @@ -0,0 +1,118 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from typing import Optional, Type, TypeVar, Union +from dataclasses import dataclass +from pydantic.fields import FieldInfo + + +T = TypeVar("T") + + +@dataclass +class SecurityMetadata: + option: bool = False + scheme: bool = False + scheme_type: Optional[str] = None + sub_type: Optional[str] = None + field_name: Optional[str] = None + + def get_field_name(self, default: str) -> str: + return self.field_name or default + + +@dataclass +class ParamMetadata: + serialization: Optional[str] = None + style: str = "simple" + explode: bool = False + + +@dataclass +class PathParamMetadata(ParamMetadata): + pass + + +@dataclass +class QueryParamMetadata(ParamMetadata): + style: str = "form" + explode: bool = True + + +@dataclass +class HeaderMetadata(ParamMetadata): + pass + + +@dataclass +class RequestMetadata: + media_type: str = "application/octet-stream" + + +@dataclass +class MultipartFormMetadata: + file: bool = False + content: bool = False + json: bool = False + + +@dataclass +class FormMetadata: + json: bool = False + style: str = "form" + explode: bool = True + + +class FieldMetadata: + security: Optional[SecurityMetadata] = None + path: Optional[PathParamMetadata] = None + query: Optional[QueryParamMetadata] = None + header: Optional[HeaderMetadata] = None + request: Optional[RequestMetadata] = None + form: Optional[FormMetadata] = None + multipart: Optional[MultipartFormMetadata] = None + + def __init__( + self, + security: Optional[SecurityMetadata] = None, + path: Optional[Union[PathParamMetadata, bool]] = None, + query: Optional[Union[QueryParamMetadata, bool]] = None, + header: Optional[Union[HeaderMetadata, bool]] = None, + request: Optional[Union[RequestMetadata, bool]] = None, + form: Optional[Union[FormMetadata, bool]] = None, + multipart: Optional[Union[MultipartFormMetadata, bool]] = None, + ): + self.security = security + self.path = PathParamMetadata() if isinstance(path, bool) else path + self.query = QueryParamMetadata() if isinstance(query, bool) else query + self.header = HeaderMetadata() if isinstance(header, bool) else header + self.request = RequestMetadata() if isinstance(request, bool) else request + self.form = FormMetadata() if isinstance(form, bool) else form + self.multipart = ( + MultipartFormMetadata() if isinstance(multipart, bool) else multipart + ) + + +def find_field_metadata(field_info: FieldInfo, metadata_type: Type[T]) -> Optional[T]: + metadata = find_metadata(field_info, FieldMetadata) + if not metadata: + return None + + fields = metadata.__dict__ + + for field in fields: + if isinstance(fields[field], metadata_type): + return fields[field] + + return None + + +def find_metadata(field_info: FieldInfo, metadata_type: Type[T]) -> Optional[T]: + metadata = field_info.metadata + if not metadata: + return None + + for md in metadata: + if isinstance(md, metadata_type): + return md + + return None diff --git a/packages/azure/src/mistralai/azure/client/utils/queryparams.py b/packages/azure/src/mistralai/azure/client/utils/queryparams.py new file mode 100644 index 00000000..c04e0db8 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/utils/queryparams.py @@ -0,0 +1,217 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from typing import ( + Any, + Dict, + get_type_hints, + List, + Optional, +) + +from pydantic import BaseModel +from pydantic.fields import FieldInfo + +from .metadata import ( + QueryParamMetadata, + find_field_metadata, +) +from .values import ( + _get_serialized_params, + _is_set, + _populate_from_globals, + _val_to_string, +) +from .forms import _populate_form + + +def get_query_params( + query_params: Any, + gbls: Optional[Any] = None, + allow_empty_value: Optional[List[str]] = None, +) -> Dict[str, List[str]]: + params: Dict[str, List[str]] = {} + + globals_already_populated = _populate_query_params(query_params, gbls, params, [], allow_empty_value) + if _is_set(gbls): + _populate_query_params(gbls, None, params, globals_already_populated, allow_empty_value) + + return params + + +def _populate_query_params( + query_params: Any, + gbls: Any, + query_param_values: Dict[str, List[str]], + skip_fields: List[str], + allow_empty_value: Optional[List[str]] = None, +) -> List[str]: + globals_already_populated: List[str] = [] + + if not isinstance(query_params, BaseModel): + return globals_already_populated + + param_fields: Dict[str, FieldInfo] = query_params.__class__.model_fields + param_field_types = get_type_hints(query_params.__class__) + for name in param_fields: + if name in skip_fields: + continue + + field = param_fields[name] + + metadata = find_field_metadata(field, QueryParamMetadata) + if not metadata: + continue + + value = getattr(query_params, name) if _is_set(query_params) else None + + value, global_found = _populate_from_globals( + name, value, QueryParamMetadata, gbls + ) + if global_found: + globals_already_populated.append(name) + + f_name = field.alias if field.alias is not None else name + + allow_empty_set = set(allow_empty_value or []) + should_include_empty = f_name in allow_empty_set and ( + value is None or value == [] or value == "" + ) + + if should_include_empty: + query_param_values[f_name] = [""] + continue + + serialization = metadata.serialization + if serialization is not None: + serialized_parms = _get_serialized_params( + metadata, f_name, value, param_field_types[name] + ) + for key, value in serialized_parms.items(): + if key in query_param_values: + query_param_values[key].extend(value) + else: + query_param_values[key] = [value] + else: + style = metadata.style + if style == "deepObject": + _populate_deep_object_query_params(f_name, value, query_param_values) + elif style == "form": + _populate_delimited_query_params( + metadata, f_name, value, ",", query_param_values + ) + elif style == "pipeDelimited": + _populate_delimited_query_params( + metadata, f_name, value, "|", query_param_values + ) + else: + raise NotImplementedError( + f"query param style {style} not yet supported" + ) + + return globals_already_populated + + +def _populate_deep_object_query_params( + field_name: str, + obj: Any, + params: Dict[str, List[str]], +): + if not _is_set(obj): + return + + if isinstance(obj, BaseModel): + _populate_deep_object_query_params_basemodel(field_name, obj, params) + elif isinstance(obj, Dict): + _populate_deep_object_query_params_dict(field_name, obj, params) + + +def _populate_deep_object_query_params_basemodel( + prior_params_key: str, + obj: Any, + params: Dict[str, List[str]], +): + if not _is_set(obj) or not isinstance(obj, BaseModel): + return + + obj_fields: Dict[str, FieldInfo] = obj.__class__.model_fields + for name in obj_fields: + obj_field = obj_fields[name] + + f_name = obj_field.alias if obj_field.alias is not None else name + + params_key = f"{prior_params_key}[{f_name}]" + + obj_param_metadata = find_field_metadata(obj_field, QueryParamMetadata) + if not _is_set(obj_param_metadata): + continue + + obj_val = getattr(obj, name) + if not _is_set(obj_val): + continue + + if isinstance(obj_val, BaseModel): + _populate_deep_object_query_params_basemodel(params_key, obj_val, params) + elif isinstance(obj_val, Dict): + _populate_deep_object_query_params_dict(params_key, obj_val, params) + elif isinstance(obj_val, List): + _populate_deep_object_query_params_list(params_key, obj_val, params) + else: + params[params_key] = [_val_to_string(obj_val)] + + +def _populate_deep_object_query_params_dict( + prior_params_key: str, + value: Dict, + params: Dict[str, List[str]], +): + if not _is_set(value): + return + + for key, val in value.items(): + if not _is_set(val): + continue + + params_key = f"{prior_params_key}[{key}]" + + if isinstance(val, BaseModel): + _populate_deep_object_query_params_basemodel(params_key, val, params) + elif isinstance(val, Dict): + _populate_deep_object_query_params_dict(params_key, val, params) + elif isinstance(val, List): + _populate_deep_object_query_params_list(params_key, val, params) + else: + params[params_key] = [_val_to_string(val)] + + +def _populate_deep_object_query_params_list( + params_key: str, + value: List, + params: Dict[str, List[str]], +): + if not _is_set(value): + return + + for val in value: + if not _is_set(val): + continue + + if params.get(params_key) is None: + params[params_key] = [] + + params[params_key].append(_val_to_string(val)) + + +def _populate_delimited_query_params( + metadata: QueryParamMetadata, + field_name: str, + obj: Any, + delimiter: str, + query_param_values: Dict[str, List[str]], +): + _populate_form( + field_name, + metadata.explode, + obj, + delimiter, + query_param_values, + ) diff --git a/packages/azure/src/mistralai/azure/client/utils/requestbodies.py b/packages/azure/src/mistralai/azure/client/utils/requestbodies.py new file mode 100644 index 00000000..1de32b6d --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/utils/requestbodies.py @@ -0,0 +1,66 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import io +from dataclasses import dataclass +import re +from typing import ( + Any, + Optional, +) + +from .forms import serialize_form_data, serialize_multipart_form + +from .serializers import marshal_json + +SERIALIZATION_METHOD_TO_CONTENT_TYPE = { + "json": "application/json", + "form": "application/x-www-form-urlencoded", + "multipart": "multipart/form-data", + "raw": "application/octet-stream", + "string": "text/plain", +} + + +@dataclass +class SerializedRequestBody: + media_type: Optional[str] = None + content: Optional[Any] = None + data: Optional[Any] = None + files: Optional[Any] = None + + +def serialize_request_body( + request_body: Any, + nullable: bool, + optional: bool, + serialization_method: str, + request_body_type, +) -> Optional[SerializedRequestBody]: + if request_body is None: + if not nullable and optional: + return None + + media_type = SERIALIZATION_METHOD_TO_CONTENT_TYPE[serialization_method] + + serialized_request_body = SerializedRequestBody(media_type) + + if re.match(r"^(application|text)\/([^+]+\+)*json.*", media_type) is not None: + serialized_request_body.content = marshal_json(request_body, request_body_type) + elif re.match(r"^multipart\/.*", media_type) is not None: + ( + serialized_request_body.media_type, + serialized_request_body.data, + serialized_request_body.files, + ) = serialize_multipart_form(media_type, request_body) + elif re.match(r"^application\/x-www-form-urlencoded.*", media_type) is not None: + serialized_request_body.data = serialize_form_data(request_body) + elif isinstance(request_body, (bytes, bytearray, io.BytesIO, io.BufferedReader)): + serialized_request_body.content = request_body + elif isinstance(request_body, str): + serialized_request_body.content = request_body + else: + raise TypeError( + f"invalid request body type {type(request_body)} for mediaType {media_type}" + ) + + return serialized_request_body diff --git a/packages/azure/src/mistralai/azure/client/utils/retries.py b/packages/azure/src/mistralai/azure/client/utils/retries.py new file mode 100644 index 00000000..af07d4e9 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/utils/retries.py @@ -0,0 +1,271 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import asyncio +import random +import time +from datetime import datetime +from email.utils import parsedate_to_datetime +from typing import List, Optional + +import httpx + + +class BackoffStrategy: + initial_interval: int + max_interval: int + exponent: float + max_elapsed_time: int + + def __init__( + self, + initial_interval: int, + max_interval: int, + exponent: float, + max_elapsed_time: int, + ): + self.initial_interval = initial_interval + self.max_interval = max_interval + self.exponent = exponent + self.max_elapsed_time = max_elapsed_time + + +class RetryConfig: + strategy: str + backoff: BackoffStrategy + retry_connection_errors: bool + + def __init__( + self, strategy: str, backoff: BackoffStrategy, retry_connection_errors: bool + ): + self.strategy = strategy + self.backoff = backoff + self.retry_connection_errors = retry_connection_errors + + +class Retries: + config: RetryConfig + status_codes: List[str] + + def __init__(self, config: RetryConfig, status_codes: List[str]): + self.config = config + self.status_codes = status_codes + + +class TemporaryError(Exception): + response: httpx.Response + retry_after: Optional[int] + + def __init__(self, response: httpx.Response): + self.response = response + self.retry_after = _parse_retry_after_header(response) + + +class PermanentError(Exception): + inner: Exception + + def __init__(self, inner: Exception): + self.inner = inner + + +def _parse_retry_after_header(response: httpx.Response) -> Optional[int]: + """Parse Retry-After header from response. + + Returns: + Retry interval in milliseconds, or None if header is missing or invalid. + """ + retry_after_header = response.headers.get("retry-after") + if not retry_after_header: + return None + + try: + seconds = float(retry_after_header) + return round(seconds * 1000) + except ValueError: + pass + + try: + retry_date = parsedate_to_datetime(retry_after_header) + delta = (retry_date - datetime.now(retry_date.tzinfo)).total_seconds() + return round(max(0, delta) * 1000) + except (ValueError, TypeError): + pass + + return None + + +def _get_sleep_interval( + exception: Exception, + initial_interval: int, + max_interval: int, + exponent: float, + retries: int, +) -> float: + """Get sleep interval for retry with exponential backoff. + + Args: + exception: The exception that triggered the retry. + initial_interval: Initial retry interval in milliseconds. + max_interval: Maximum retry interval in milliseconds. + exponent: Base for exponential backoff calculation. + retries: Current retry attempt count. + + Returns: + Sleep interval in seconds. + """ + if ( + isinstance(exception, TemporaryError) + and exception.retry_after is not None + and exception.retry_after > 0 + ): + return exception.retry_after / 1000 + + sleep = (initial_interval / 1000) * exponent**retries + random.uniform(0, 1) + return min(sleep, max_interval / 1000) + + +def retry(func, retries: Retries): + if retries.config.strategy == "backoff": + + def do_request() -> httpx.Response: + res: httpx.Response + try: + res = func() + + for code in retries.status_codes: + if "X" in code.upper(): + code_range = int(code[0]) + + status_major = res.status_code / 100 + + if code_range <= status_major < code_range + 1: + raise TemporaryError(res) + else: + parsed_code = int(code) + + if res.status_code == parsed_code: + raise TemporaryError(res) + except (httpx.NetworkError, httpx.TimeoutException) as exception: + if retries.config.retry_connection_errors: + raise + + raise PermanentError(exception) from exception + except TemporaryError: + raise + except Exception as exception: + raise PermanentError(exception) from exception + + return res + + return retry_with_backoff( + do_request, + retries.config.backoff.initial_interval, + retries.config.backoff.max_interval, + retries.config.backoff.exponent, + retries.config.backoff.max_elapsed_time, + ) + + return func() + + +async def retry_async(func, retries: Retries): + if retries.config.strategy == "backoff": + + async def do_request() -> httpx.Response: + res: httpx.Response + try: + res = await func() + + for code in retries.status_codes: + if "X" in code.upper(): + code_range = int(code[0]) + + status_major = res.status_code / 100 + + if code_range <= status_major < code_range + 1: + raise TemporaryError(res) + else: + parsed_code = int(code) + + if res.status_code == parsed_code: + raise TemporaryError(res) + except (httpx.NetworkError, httpx.TimeoutException) as exception: + if retries.config.retry_connection_errors: + raise + + raise PermanentError(exception) from exception + except TemporaryError: + raise + except Exception as exception: + raise PermanentError(exception) from exception + + return res + + return await retry_with_backoff_async( + do_request, + retries.config.backoff.initial_interval, + retries.config.backoff.max_interval, + retries.config.backoff.exponent, + retries.config.backoff.max_elapsed_time, + ) + + return await func() + + +def retry_with_backoff( + func, + initial_interval=500, + max_interval=60000, + exponent=1.5, + max_elapsed_time=3600000, +): + start = round(time.time() * 1000) + retries = 0 + + while True: + try: + return func() + except PermanentError as exception: + raise exception.inner + except Exception as exception: # pylint: disable=broad-exception-caught + now = round(time.time() * 1000) + if now - start > max_elapsed_time: + if isinstance(exception, TemporaryError): + return exception.response + + raise + + sleep = _get_sleep_interval( + exception, initial_interval, max_interval, exponent, retries + ) + time.sleep(sleep) + retries += 1 + + +async def retry_with_backoff_async( + func, + initial_interval=500, + max_interval=60000, + exponent=1.5, + max_elapsed_time=3600000, +): + start = round(time.time() * 1000) + retries = 0 + + while True: + try: + return await func() + except PermanentError as exception: + raise exception.inner + except Exception as exception: # pylint: disable=broad-exception-caught + now = round(time.time() * 1000) + if now - start > max_elapsed_time: + if isinstance(exception, TemporaryError): + return exception.response + + raise + + sleep = _get_sleep_interval( + exception, initial_interval, max_interval, exponent, retries + ) + await asyncio.sleep(sleep) + retries += 1 diff --git a/packages/azure/src/mistralai/azure/client/utils/security.py b/packages/azure/src/mistralai/azure/client/utils/security.py new file mode 100644 index 00000000..17996bd5 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/utils/security.py @@ -0,0 +1,176 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import base64 +from typing import ( + Any, + Dict, + List, + Tuple, +) +from pydantic import BaseModel +from pydantic.fields import FieldInfo + +from .metadata import ( + SecurityMetadata, + find_field_metadata, +) + + +def get_security(security: Any) -> Tuple[Dict[str, str], Dict[str, List[str]]]: + headers: Dict[str, str] = {} + query_params: Dict[str, List[str]] = {} + + if security is None: + return headers, query_params + + if not isinstance(security, BaseModel): + raise TypeError("security must be a pydantic model") + + sec_fields: Dict[str, FieldInfo] = security.__class__.model_fields + for name in sec_fields: + sec_field = sec_fields[name] + + value = getattr(security, name) + if value is None: + continue + + metadata = find_field_metadata(sec_field, SecurityMetadata) + if metadata is None: + continue + if metadata.option: + _parse_security_option(headers, query_params, value) + return headers, query_params + if metadata.scheme: + # Special case for basic auth or custom auth which could be a flattened model + if metadata.sub_type in ["basic", "custom"] and not isinstance( + value, BaseModel + ): + _parse_security_scheme(headers, query_params, metadata, name, security) + else: + _parse_security_scheme(headers, query_params, metadata, name, value) + + return headers, query_params + + +def _parse_security_option( + headers: Dict[str, str], query_params: Dict[str, List[str]], option: Any +): + if not isinstance(option, BaseModel): + raise TypeError("security option must be a pydantic model") + + opt_fields: Dict[str, FieldInfo] = option.__class__.model_fields + for name in opt_fields: + opt_field = opt_fields[name] + + metadata = find_field_metadata(opt_field, SecurityMetadata) + if metadata is None or not metadata.scheme: + continue + _parse_security_scheme( + headers, query_params, metadata, name, getattr(option, name) + ) + + +def _parse_security_scheme( + headers: Dict[str, str], + query_params: Dict[str, List[str]], + scheme_metadata: SecurityMetadata, + field_name: str, + scheme: Any, +): + scheme_type = scheme_metadata.scheme_type + sub_type = scheme_metadata.sub_type + + if isinstance(scheme, BaseModel): + if scheme_type == "http": + if sub_type == "basic": + _parse_basic_auth_scheme(headers, scheme) + return + if sub_type == "custom": + return + + scheme_fields: Dict[str, FieldInfo] = scheme.__class__.model_fields + for name in scheme_fields: + scheme_field = scheme_fields[name] + + metadata = find_field_metadata(scheme_field, SecurityMetadata) + if metadata is None or metadata.field_name is None: + continue + + value = getattr(scheme, name) + + _parse_security_scheme_value( + headers, query_params, scheme_metadata, metadata, name, value + ) + else: + _parse_security_scheme_value( + headers, query_params, scheme_metadata, scheme_metadata, field_name, scheme + ) + + +def _parse_security_scheme_value( + headers: Dict[str, str], + query_params: Dict[str, List[str]], + scheme_metadata: SecurityMetadata, + security_metadata: SecurityMetadata, + field_name: str, + value: Any, +): + scheme_type = scheme_metadata.scheme_type + sub_type = scheme_metadata.sub_type + + header_name = security_metadata.get_field_name(field_name) + + if scheme_type == "apiKey": + if sub_type == "header": + headers[header_name] = value + elif sub_type == "query": + query_params[header_name] = [value] + else: + raise ValueError("sub type {sub_type} not supported") + elif scheme_type == "openIdConnect": + headers[header_name] = _apply_bearer(value) + elif scheme_type == "oauth2": + if sub_type != "client_credentials": + headers[header_name] = _apply_bearer(value) + elif scheme_type == "http": + if sub_type == "bearer": + headers[header_name] = _apply_bearer(value) + elif sub_type == "basic": + headers[header_name] = value + elif sub_type == "custom": + return + else: + raise ValueError("sub type {sub_type} not supported") + else: + raise ValueError("scheme type {scheme_type} not supported") + + +def _apply_bearer(token: str) -> str: + return token.lower().startswith("bearer ") and token or f"Bearer {token}" + + +def _parse_basic_auth_scheme(headers: Dict[str, str], scheme: Any): + username = "" + password = "" + + if not isinstance(scheme, BaseModel): + raise TypeError("basic auth scheme must be a pydantic model") + + scheme_fields: Dict[str, FieldInfo] = scheme.__class__.model_fields + for name in scheme_fields: + scheme_field = scheme_fields[name] + + metadata = find_field_metadata(scheme_field, SecurityMetadata) + if metadata is None or metadata.field_name is None: + continue + + field_name = metadata.field_name + value = getattr(scheme, name) + + if field_name == "username": + username = value + if field_name == "password": + password = value + + data = f"{username}:{password}".encode() + headers["Authorization"] = f"Basic {base64.b64encode(data).decode()}" diff --git a/packages/azure/src/mistralai/azure/client/utils/serializers.py b/packages/azure/src/mistralai/azure/client/utils/serializers.py new file mode 100644 index 00000000..14321eb4 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/utils/serializers.py @@ -0,0 +1,229 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from decimal import Decimal +import functools +import json +import typing +from typing import Any, Dict, List, Tuple, Union, get_args +import typing_extensions +from typing_extensions import get_origin + +import httpx +from pydantic import ConfigDict, create_model +from pydantic_core import from_json + +from ..types.basemodel import BaseModel, Nullable, OptionalNullable, Unset + + +def serialize_decimal(as_str: bool): + def serialize(d): + # Optional[T] is a Union[T, None] + if is_union(type(d)) and type(None) in get_args(type(d)) and d is None: + return None + if isinstance(d, Unset): + return d + + if not isinstance(d, Decimal): + raise ValueError("Expected Decimal object") + + return str(d) if as_str else float(d) + + return serialize + + +def validate_decimal(d): + if d is None: + return None + + if isinstance(d, (Decimal, Unset)): + return d + + if not isinstance(d, (str, int, float)): + raise ValueError("Expected string, int or float") + + return Decimal(str(d)) + + +def serialize_float(as_str: bool): + def serialize(f): + # Optional[T] is a Union[T, None] + if is_union(type(f)) and type(None) in get_args(type(f)) and f is None: + return None + if isinstance(f, Unset): + return f + + if not isinstance(f, float): + raise ValueError("Expected float") + + return str(f) if as_str else f + + return serialize + + +def validate_float(f): + if f is None: + return None + + if isinstance(f, (float, Unset)): + return f + + if not isinstance(f, str): + raise ValueError("Expected string") + + return float(f) + + +def serialize_int(as_str: bool): + def serialize(i): + # Optional[T] is a Union[T, None] + if is_union(type(i)) and type(None) in get_args(type(i)) and i is None: + return None + if isinstance(i, Unset): + return i + + if not isinstance(i, int): + raise ValueError("Expected int") + + return str(i) if as_str else i + + return serialize + + +def validate_int(b): + if b is None: + return None + + if isinstance(b, (int, Unset)): + return b + + if not isinstance(b, str): + raise ValueError("Expected string") + + return int(b) + + +def validate_const(v): + def validate(c): + # Optional[T] is a Union[T, None] + if is_union(type(c)) and type(None) in get_args(type(c)) and c is None: + return None + + if v != c: + raise ValueError(f"Expected {v}") + + return c + + return validate + + +def unmarshal_json(raw, typ: Any) -> Any: + return unmarshal(from_json(raw), typ) + + +def unmarshal(val, typ: Any) -> Any: + unmarshaller = create_model( + "Unmarshaller", + body=(typ, ...), + __config__=ConfigDict(populate_by_name=True, arbitrary_types_allowed=True), + ) + + m = unmarshaller(body=val) + + # pyright: ignore[reportAttributeAccessIssue] + return m.body # type: ignore + + +def marshal_json(val, typ): + if is_nullable(typ) and val is None: + return "null" + + marshaller = create_model( + "Marshaller", + body=(typ, ...), + __config__=ConfigDict(populate_by_name=True, arbitrary_types_allowed=True), + ) + + m = marshaller(body=val) + + d = m.model_dump(by_alias=True, mode="json", exclude_none=True) + + if len(d) == 0: + return "" + + return json.dumps(d[next(iter(d))], separators=(",", ":")) + + +def is_nullable(field): + origin = get_origin(field) + if origin is Nullable or origin is OptionalNullable: + return True + + if not origin is Union or type(None) not in get_args(field): + return False + + for arg in get_args(field): + if get_origin(arg) is Nullable or get_origin(arg) is OptionalNullable: + return True + + return False + + +def is_union(obj: object) -> bool: + """ + Returns True if the given object is a typing.Union or typing_extensions.Union. + """ + return any( + obj is typing_obj for typing_obj in _get_typing_objects_by_name_of("Union") + ) + + +def stream_to_text(stream: httpx.Response) -> str: + return "".join(stream.iter_text()) + + +async def stream_to_text_async(stream: httpx.Response) -> str: + return "".join([chunk async for chunk in stream.aiter_text()]) + + +def stream_to_bytes(stream: httpx.Response) -> bytes: + return stream.content + + +async def stream_to_bytes_async(stream: httpx.Response) -> bytes: + return await stream.aread() + + +def get_pydantic_model(data: Any, typ: Any) -> Any: + if not _contains_pydantic_model(data): + return unmarshal(data, typ) + + return data + + +def _contains_pydantic_model(data: Any) -> bool: + if isinstance(data, BaseModel): + return True + if isinstance(data, List): + return any(_contains_pydantic_model(item) for item in data) + if isinstance(data, Dict): + return any(_contains_pydantic_model(value) for value in data.values()) + + return False + + +@functools.cache +def _get_typing_objects_by_name_of(name: str) -> Tuple[Any, ...]: + """ + Get typing objects by name from typing and typing_extensions. + Reference: https://typing-extensions.readthedocs.io/en/latest/#runtime-use-of-types + """ + result = tuple( + getattr(module, name) + for module in (typing, typing_extensions) + if hasattr(module, name) + ) + if not result: + raise ValueError( + f"Neither typing nor typing_extensions has an object called {name!r}" + ) + return result diff --git a/packages/azure/src/mistralai/azure/client/utils/unions.py b/packages/azure/src/mistralai/azure/client/utils/unions.py new file mode 100644 index 00000000..a227f4e8 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/utils/unions.py @@ -0,0 +1,32 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from typing import Any + +from pydantic import BaseModel, TypeAdapter + + +def parse_open_union( + v: Any, + *, + disc_key: str, + variants: dict[str, Any], + unknown_cls: type, + union_name: str, +) -> Any: + """Parse an open discriminated union value with forward-compatibility. + + Known discriminator values are dispatched to their variant types. + Unknown discriminator values produce an instance of the fallback class, + preserving the raw payload for inspection. + """ + if isinstance(v, BaseModel): + return v + if not isinstance(v, dict) or disc_key not in v: + raise ValueError(f"{union_name}: expected object with '{disc_key}' field") + disc = v[disc_key] + variant_cls = variants.get(disc) + if variant_cls is not None: + if isinstance(variant_cls, type) and issubclass(variant_cls, BaseModel): + return variant_cls.model_validate(v) + return TypeAdapter(variant_cls).validate_python(v) + return unknown_cls(raw=v) diff --git a/packages/azure/src/mistralai/azure/client/utils/unmarshal_json_response.py b/packages/azure/src/mistralai/azure/client/utils/unmarshal_json_response.py new file mode 100644 index 00000000..fe0c9b8e --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/utils/unmarshal_json_response.py @@ -0,0 +1,38 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from typing import Any, Optional, Type, TypeVar, overload + +import httpx + +from .serializers import unmarshal_json +from mistralai.azure.client import errors + +T = TypeVar("T") + + +@overload +def unmarshal_json_response( + typ: Type[T], http_res: httpx.Response, body: Optional[str] = None +) -> T: ... + + +@overload +def unmarshal_json_response( + typ: Any, http_res: httpx.Response, body: Optional[str] = None +) -> Any: ... + + +def unmarshal_json_response( + typ: Any, http_res: httpx.Response, body: Optional[str] = None +) -> Any: + if body is None: + body = http_res.text + try: + return unmarshal_json(body, typ) + except Exception as e: + raise errors.ResponseValidationError( + "Response validation failed", + http_res, + e, + body, + ) from e diff --git a/packages/azure/src/mistralai/azure/client/utils/url.py b/packages/azure/src/mistralai/azure/client/utils/url.py new file mode 100644 index 00000000..c78ccbae --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/utils/url.py @@ -0,0 +1,155 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from decimal import Decimal +from typing import ( + Any, + Dict, + get_type_hints, + List, + Optional, + Union, + get_args, + get_origin, +) +from pydantic import BaseModel +from pydantic.fields import FieldInfo + +from .metadata import ( + PathParamMetadata, + find_field_metadata, +) +from .values import ( + _get_serialized_params, + _is_set, + _populate_from_globals, + _val_to_string, +) + + +def generate_url( + server_url: str, + path: str, + path_params: Any, + gbls: Optional[Any] = None, +) -> str: + path_param_values: Dict[str, str] = {} + + globals_already_populated = _populate_path_params( + path_params, gbls, path_param_values, [] + ) + if _is_set(gbls): + _populate_path_params(gbls, None, path_param_values, globals_already_populated) + + for key, value in path_param_values.items(): + path = path.replace("{" + key + "}", value, 1) + + return remove_suffix(server_url, "/") + path + + +def _populate_path_params( + path_params: Any, + gbls: Any, + path_param_values: Dict[str, str], + skip_fields: List[str], +) -> List[str]: + globals_already_populated: List[str] = [] + + if not isinstance(path_params, BaseModel): + return globals_already_populated + + path_param_fields: Dict[str, FieldInfo] = path_params.__class__.model_fields + path_param_field_types = get_type_hints(path_params.__class__) + for name in path_param_fields: + if name in skip_fields: + continue + + field = path_param_fields[name] + + param_metadata = find_field_metadata(field, PathParamMetadata) + if param_metadata is None: + continue + + param = getattr(path_params, name) if _is_set(path_params) else None + param, global_found = _populate_from_globals( + name, param, PathParamMetadata, gbls + ) + if global_found: + globals_already_populated.append(name) + + if not _is_set(param): + continue + + f_name = field.alias if field.alias is not None else name + serialization = param_metadata.serialization + if serialization is not None: + serialized_params = _get_serialized_params( + param_metadata, f_name, param, path_param_field_types[name] + ) + for key, value in serialized_params.items(): + path_param_values[key] = value + else: + pp_vals: List[str] = [] + if param_metadata.style == "simple": + if isinstance(param, List): + for pp_val in param: + if not _is_set(pp_val): + continue + pp_vals.append(_val_to_string(pp_val)) + path_param_values[f_name] = ",".join(pp_vals) + elif isinstance(param, Dict): + for pp_key in param: + if not _is_set(param[pp_key]): + continue + if param_metadata.explode: + pp_vals.append(f"{pp_key}={_val_to_string(param[pp_key])}") + else: + pp_vals.append(f"{pp_key},{_val_to_string(param[pp_key])}") + path_param_values[f_name] = ",".join(pp_vals) + elif not isinstance(param, (str, int, float, complex, bool, Decimal)): + param_fields: Dict[str, FieldInfo] = param.__class__.model_fields + for name in param_fields: + param_field = param_fields[name] + + param_value_metadata = find_field_metadata( + param_field, PathParamMetadata + ) + if param_value_metadata is None: + continue + + param_name = ( + param_field.alias if param_field.alias is not None else name + ) + + param_field_val = getattr(param, name) + if not _is_set(param_field_val): + continue + if param_metadata.explode: + pp_vals.append( + f"{param_name}={_val_to_string(param_field_val)}" + ) + else: + pp_vals.append( + f"{param_name},{_val_to_string(param_field_val)}" + ) + path_param_values[f_name] = ",".join(pp_vals) + elif _is_set(param): + path_param_values[f_name] = _val_to_string(param) + + return globals_already_populated + + +def is_optional(field): + return get_origin(field) is Union and type(None) in get_args(field) + + +def template_url(url_with_params: str, params: Dict[str, str]) -> str: + for key, value in params.items(): + url_with_params = url_with_params.replace("{" + key + "}", value) + + return url_with_params + + +def remove_suffix(input_string, suffix): + if suffix and input_string.endswith(suffix): + return input_string[: -len(suffix)] + return input_string diff --git a/packages/azure/src/mistralai/azure/client/utils/values.py b/packages/azure/src/mistralai/azure/client/utils/values.py new file mode 100644 index 00000000..dae01a44 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/utils/values.py @@ -0,0 +1,137 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from datetime import datetime +from enum import Enum +from email.message import Message +from functools import partial +import os +from typing import Any, Callable, Dict, List, Optional, Tuple, TypeVar, Union, cast + +from httpx import Response +from pydantic import BaseModel +from pydantic.fields import FieldInfo + +from ..types.basemodel import Unset + +from .serializers import marshal_json + +from .metadata import ParamMetadata, find_field_metadata + + +def match_content_type(content_type: str, pattern: str) -> bool: + if pattern in (content_type, "*", "*/*"): + return True + + msg = Message() + msg["content-type"] = content_type + media_type = msg.get_content_type() + + if media_type == pattern: + return True + + parts = media_type.split("/") + if len(parts) == 2: + if pattern in (f"{parts[0]}/*", f"*/{parts[1]}"): + return True + + return False + + +def match_status_codes(status_codes: List[str], status_code: int) -> bool: + if "default" in status_codes: + return True + + for code in status_codes: + if code == str(status_code): + return True + + if code.endswith("XX") and code.startswith(str(status_code)[:1]): + return True + return False + + +T = TypeVar("T") + +def cast_partial(typ): + return partial(cast, typ) + +def get_global_from_env( + value: Optional[T], env_key: str, type_cast: Callable[[str], T] +) -> Optional[T]: + if value is not None: + return value + env_value = os.getenv(env_key) + if env_value is not None: + try: + return type_cast(env_value) + except ValueError: + pass + return None + + +def match_response( + response: Response, code: Union[str, List[str]], content_type: str +) -> bool: + codes = code if isinstance(code, list) else [code] + return match_status_codes(codes, response.status_code) and match_content_type( + response.headers.get("content-type", "application/octet-stream"), content_type + ) + + +def _populate_from_globals( + param_name: str, value: Any, param_metadata_type: type, gbls: Any +) -> Tuple[Any, bool]: + if gbls is None: + return value, False + + if not isinstance(gbls, BaseModel): + raise TypeError("globals must be a pydantic model") + + global_fields: Dict[str, FieldInfo] = gbls.__class__.model_fields + found = False + for name in global_fields: + field = global_fields[name] + if name is not param_name: + continue + + found = True + + if value is not None: + return value, True + + global_value = getattr(gbls, name) + + param_metadata = find_field_metadata(field, param_metadata_type) + if param_metadata is None: + return value, True + + return global_value, True + + return value, found + + +def _val_to_string(val) -> str: + if isinstance(val, bool): + return str(val).lower() + if isinstance(val, datetime): + return str(val.isoformat().replace("+00:00", "Z")) + if isinstance(val, Enum): + return str(val.value) + + return str(val) + + +def _get_serialized_params( + metadata: ParamMetadata, field_name: str, obj: Any, typ: type +) -> Dict[str, str]: + params: Dict[str, str] = {} + + serialization = metadata.serialization + if serialization == "json": + params[field_name] = marshal_json(obj, typ) + + return params + + +def _is_set(value: Any) -> bool: + return value is not None and not isinstance(value, Unset) diff --git a/packages/azure/uv.lock b/packages/azure/uv.lock new file mode 100644 index 00000000..df3719ff --- /dev/null +++ b/packages/azure/uv.lock @@ -0,0 +1,482 @@ +version = 1 +revision = 3 +requires-python = ">=3.10" +resolution-markers = [ + "python_full_version >= '3.12'", + "python_full_version == '3.11.*'", + "python_full_version < '3.11'", +] + +[[package]] +name = "annotated-types" +version = "0.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ee/67/531ea369ba64dcff5ec9c3402f9f51bf748cec26dde048a2f973a4eea7f5/annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89", size = 16081, upload-time = "2024-05-20T21:33:25.928Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53", size = 13643, upload-time = "2024-05-20T21:33:24.1Z" }, +] + +[[package]] +name = "anyio" +version = "4.4.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "exceptiongroup", marker = "python_full_version < '3.11'" }, + { name = "idna" }, + { name = "sniffio" }, + { name = "typing-extensions", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e6/e3/c4c8d473d6780ef1853d630d581f70d655b4f8d7553c6997958c283039a2/anyio-4.4.0.tar.gz", hash = "sha256:5aadc6a1bbb7cdb0bede386cac5e2940f5e2ff3aa20277e991cf028e0585ce94", size = 163930, upload-time = "2024-05-26T22:02:15.75Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7b/a2/10639a79341f6c019dedc95bd48a4928eed9f1d1197f4c04f546fc7ae0ff/anyio-4.4.0-py3-none-any.whl", hash = "sha256:c1b2d8f46a8a812513012e1107cb0e68c17159a7a594208005a57dc776e1bdc7", size = 86780, upload-time = "2024-05-26T22:02:13.671Z" }, +] + +[[package]] +name = "astroid" +version = "3.2.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/9e/53/1067e1113ecaf58312357f2cd93063674924119d80d173adc3f6f2387aa2/astroid-3.2.4.tar.gz", hash = "sha256:0e14202810b30da1b735827f78f5157be2bbd4a7a59b7707ca0bfc2fb4c0063a", size = 397576, upload-time = "2024-07-20T12:57:43.26Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/80/96/b32bbbb46170a1c8b8b1f28c794202e25cfe743565e9d3469b8eb1e0cc05/astroid-3.2.4-py3-none-any.whl", hash = "sha256:413658a61eeca6202a59231abb473f932038fbcbf1666587f66d482083413a25", size = 276348, upload-time = "2024-07-20T12:57:40.886Z" }, +] + +[[package]] +name = "certifi" +version = "2024.7.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/c2/02/a95f2b11e207f68bc64d7aae9666fed2e2b3f307748d5123dffb72a1bbea/certifi-2024.7.4.tar.gz", hash = "sha256:5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b", size = 164065, upload-time = "2024-07-04T01:36:11.653Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1c/d5/c84e1a17bf61d4df64ca866a1c9a913874b4e9bdc131ec689a0ad013fb36/certifi-2024.7.4-py3-none-any.whl", hash = "sha256:c198e21b1289c2ab85ee4e67bb4b4ef3ead0892059901a8d5b622f24a1101e90", size = 162960, upload-time = "2024-07-04T01:36:09.038Z" }, +] + +[[package]] +name = "colorama" +version = "0.4.6" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697, upload-time = "2022-10-25T02:36:22.414Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335, upload-time = "2022-10-25T02:36:20.889Z" }, +] + +[[package]] +name = "dill" +version = "0.3.8" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/17/4d/ac7ffa80c69ea1df30a8aa11b3578692a5118e7cd1aa157e3ef73b092d15/dill-0.3.8.tar.gz", hash = "sha256:3ebe3c479ad625c4553aca177444d89b486b1d84982eeacded644afc0cf797ca", size = 184847, upload-time = "2024-01-27T23:42:16.145Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c9/7a/cef76fd8438a42f96db64ddaa85280485a9c395e7df3db8158cfec1eee34/dill-0.3.8-py3-none-any.whl", hash = "sha256:c36ca9ffb54365bdd2f8eb3eff7d2a21237f8452b57ace88b1ac615b7e815bd7", size = 116252, upload-time = "2024-01-27T23:42:14.239Z" }, +] + +[[package]] +name = "exceptiongroup" +version = "1.2.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/09/35/2495c4ac46b980e4ca1f6ad6db102322ef3ad2410b79fdde159a4b0f3b92/exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc", size = 28883, upload-time = "2024-07-12T22:26:00.161Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/02/cc/b7e31358aac6ed1ef2bb790a9746ac2c69bcb3c8588b41616914eb106eaf/exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b", size = 16453, upload-time = "2024-07-12T22:25:58.476Z" }, +] + +[[package]] +name = "h11" +version = "0.16.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/01/ee/02a2c011bdab74c6fb3c75474d40b3052059d95df7e73351460c8588d963/h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1", size = 101250, upload-time = "2025-04-24T03:35:25.427Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/04/4b/29cac41a4d98d144bf5f6d33995617b185d14b22401f75ca86f384e87ff1/h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86", size = 37515, upload-time = "2025-04-24T03:35:24.344Z" }, +] + +[[package]] +name = "httpcore" +version = "1.0.9" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "h11" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/06/94/82699a10bca87a5556c9c59b5963f2d039dbd239f25bc2a63907a05a14cb/httpcore-1.0.9.tar.gz", hash = "sha256:6e34463af53fd2ab5d807f399a9b45ea31c3dfa2276f15a2c3f00afff6e176e8", size = 85484, upload-time = "2025-04-24T22:06:22.219Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7e/f5/f66802a942d491edb555dd61e3a9961140fd64c90bce1eafd741609d334d/httpcore-1.0.9-py3-none-any.whl", hash = "sha256:2d400746a40668fc9dec9810239072b40b4484b640a8c38fd654a024c7a1bf55", size = 78784, upload-time = "2025-04-24T22:06:20.566Z" }, +] + +[[package]] +name = "httpx" +version = "0.28.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "certifi" }, + { name = "httpcore" }, + { name = "idna" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b1/df/48c586a5fe32a0f01324ee087459e112ebb7224f646c0b5023f5e79e9956/httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc", size = 141406, upload-time = "2024-12-06T15:37:23.222Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2a/39/e50c7c3a983047577ee07d2a9e53faf5a69493943ec3f6a384bdc792deb2/httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad", size = 73517, upload-time = "2024-12-06T15:37:21.509Z" }, +] + +[[package]] +name = "idna" +version = "3.7" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/21/ed/f86a79a07470cb07819390452f178b3bef1d375f2ec021ecfc709fc7cf07/idna-3.7.tar.gz", hash = "sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc", size = 189575, upload-time = "2024-04-11T03:34:43.276Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e5/3e/741d8c82801c347547f8a2a06aa57dbb1992be9e948df2ea0eda2c8b79e8/idna-3.7-py3-none-any.whl", hash = "sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0", size = 66836, upload-time = "2024-04-11T03:34:41.447Z" }, +] + +[[package]] +name = "iniconfig" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d7/4b/cbd8e699e64a6f16ca3a8220661b5f83792b3017d0f79807cb8708d33913/iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3", size = 4646, upload-time = "2023-01-07T11:08:11.254Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ef/a6/62565a6e1cf69e10f5727360368e451d4b7f58beeac6173dc9db836a5b46/iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374", size = 5892, upload-time = "2023-01-07T11:08:09.864Z" }, +] + +[[package]] +name = "isort" +version = "5.13.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/87/f9/c1eb8635a24e87ade2efce21e3ce8cd6b8630bb685ddc9cdaca1349b2eb5/isort-5.13.2.tar.gz", hash = "sha256:48fdfcb9face5d58a4f6dde2e72a1fb8dcaf8ab26f95ab49fab84c2ddefb0109", size = 175303, upload-time = "2023-12-13T20:37:26.124Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/b3/8def84f539e7d2289a02f0524b944b15d7c75dab7628bedf1c4f0992029c/isort-5.13.2-py3-none-any.whl", hash = "sha256:8ca5e72a8d85860d5a3fa69b8745237f2939afe12dbf656afbcb47fe72d947a6", size = 92310, upload-time = "2023-12-13T20:37:23.244Z" }, +] + +[[package]] +name = "mccabe" +version = "0.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e7/ff/0ffefdcac38932a54d2b5eed4e0ba8a408f215002cd178ad1df0f2806ff8/mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325", size = 9658, upload-time = "2022-01-24T01:14:51.113Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/27/1a/1f68f9ba0c207934b35b86a8ca3aad8395a3d6dd7921c0686e23853ff5a9/mccabe-0.7.0-py2.py3-none-any.whl", hash = "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e", size = 7350, upload-time = "2022-01-24T01:14:49.62Z" }, +] + +[[package]] +name = "mistralai-azure" +version = "2.0.0rc1" +source = { editable = "." } +dependencies = [ + { name = "httpcore" }, + { name = "httpx" }, + { name = "pydantic" }, +] + +[package.dev-dependencies] +dev = [ + { name = "mypy" }, + { name = "pylint" }, + { name = "pyright" }, + { name = "pytest" }, + { name = "pytest-asyncio" }, +] + +[package.metadata] +requires-dist = [ + { name = "httpcore", specifier = ">=1.0.9" }, + { name = "httpx", specifier = ">=0.28.1" }, + { name = "pydantic", specifier = ">=2.11.2" }, +] + +[package.metadata.requires-dev] +dev = [ + { name = "mypy", specifier = "==1.15.0" }, + { name = "pylint", specifier = "==3.2.3" }, + { name = "pyright", specifier = ">=1.1.401,<2" }, + { name = "pytest", specifier = ">=8.2.2,<9" }, + { name = "pytest-asyncio", specifier = ">=0.23.7,<0.24" }, +] + +[[package]] +name = "mypy" +version = "1.15.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "mypy-extensions" }, + { name = "tomli", marker = "python_full_version < '3.11'" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ce/43/d5e49a86afa64bd3839ea0d5b9c7103487007d728e1293f52525d6d5486a/mypy-1.15.0.tar.gz", hash = "sha256:404534629d51d3efea5c800ee7c42b72a6554d6c400e6a79eafe15d11341fd43", size = 3239717, upload-time = "2025-02-05T03:50:34.655Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/68/f8/65a7ce8d0e09b6329ad0c8d40330d100ea343bd4dd04c4f8ae26462d0a17/mypy-1.15.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:979e4e1a006511dacf628e36fadfecbcc0160a8af6ca7dad2f5025529e082c13", size = 10738433, upload-time = "2025-02-05T03:49:29.145Z" }, + { url = "https://files.pythonhosted.org/packages/b4/95/9c0ecb8eacfe048583706249439ff52105b3f552ea9c4024166c03224270/mypy-1.15.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c4bb0e1bd29f7d34efcccd71cf733580191e9a264a2202b0239da95984c5b559", size = 9861472, upload-time = "2025-02-05T03:49:16.986Z" }, + { url = "https://files.pythonhosted.org/packages/84/09/9ec95e982e282e20c0d5407bc65031dfd0f0f8ecc66b69538296e06fcbee/mypy-1.15.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:be68172e9fd9ad8fb876c6389f16d1c1b5f100ffa779f77b1fb2176fcc9ab95b", size = 11611424, upload-time = "2025-02-05T03:49:46.908Z" }, + { url = "https://files.pythonhosted.org/packages/78/13/f7d14e55865036a1e6a0a69580c240f43bc1f37407fe9235c0d4ef25ffb0/mypy-1.15.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c7be1e46525adfa0d97681432ee9fcd61a3964c2446795714699a998d193f1a3", size = 12365450, upload-time = "2025-02-05T03:50:05.89Z" }, + { url = "https://files.pythonhosted.org/packages/48/e1/301a73852d40c241e915ac6d7bcd7fedd47d519246db2d7b86b9d7e7a0cb/mypy-1.15.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:2e2c2e6d3593f6451b18588848e66260ff62ccca522dd231cd4dd59b0160668b", size = 12551765, upload-time = "2025-02-05T03:49:33.56Z" }, + { url = "https://files.pythonhosted.org/packages/77/ba/c37bc323ae5fe7f3f15a28e06ab012cd0b7552886118943e90b15af31195/mypy-1.15.0-cp310-cp310-win_amd64.whl", hash = "sha256:6983aae8b2f653e098edb77f893f7b6aca69f6cffb19b2cc7443f23cce5f4828", size = 9274701, upload-time = "2025-02-05T03:49:38.981Z" }, + { url = "https://files.pythonhosted.org/packages/03/bc/f6339726c627bd7ca1ce0fa56c9ae2d0144604a319e0e339bdadafbbb599/mypy-1.15.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2922d42e16d6de288022e5ca321cd0618b238cfc5570e0263e5ba0a77dbef56f", size = 10662338, upload-time = "2025-02-05T03:50:17.287Z" }, + { url = "https://files.pythonhosted.org/packages/e2/90/8dcf506ca1a09b0d17555cc00cd69aee402c203911410136cd716559efe7/mypy-1.15.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2ee2d57e01a7c35de00f4634ba1bbf015185b219e4dc5909e281016df43f5ee5", size = 9787540, upload-time = "2025-02-05T03:49:51.21Z" }, + { url = "https://files.pythonhosted.org/packages/05/05/a10f9479681e5da09ef2f9426f650d7b550d4bafbef683b69aad1ba87457/mypy-1.15.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:973500e0774b85d9689715feeffcc980193086551110fd678ebe1f4342fb7c5e", size = 11538051, upload-time = "2025-02-05T03:50:20.885Z" }, + { url = "https://files.pythonhosted.org/packages/e9/9a/1f7d18b30edd57441a6411fcbc0c6869448d1a4bacbaee60656ac0fc29c8/mypy-1.15.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5a95fb17c13e29d2d5195869262f8125dfdb5c134dc8d9a9d0aecf7525b10c2c", size = 12286751, upload-time = "2025-02-05T03:49:42.408Z" }, + { url = "https://files.pythonhosted.org/packages/72/af/19ff499b6f1dafcaf56f9881f7a965ac2f474f69f6f618b5175b044299f5/mypy-1.15.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1905f494bfd7d85a23a88c5d97840888a7bd516545fc5aaedff0267e0bb54e2f", size = 12421783, upload-time = "2025-02-05T03:49:07.707Z" }, + { url = "https://files.pythonhosted.org/packages/96/39/11b57431a1f686c1aed54bf794870efe0f6aeca11aca281a0bd87a5ad42c/mypy-1.15.0-cp311-cp311-win_amd64.whl", hash = "sha256:c9817fa23833ff189db061e6d2eff49b2f3b6ed9856b4a0a73046e41932d744f", size = 9265618, upload-time = "2025-02-05T03:49:54.581Z" }, + { url = "https://files.pythonhosted.org/packages/98/3a/03c74331c5eb8bd025734e04c9840532226775c47a2c39b56a0c8d4f128d/mypy-1.15.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:aea39e0583d05124836ea645f412e88a5c7d0fd77a6d694b60d9b6b2d9f184fd", size = 10793981, upload-time = "2025-02-05T03:50:28.25Z" }, + { url = "https://files.pythonhosted.org/packages/f0/1a/41759b18f2cfd568848a37c89030aeb03534411eef981df621d8fad08a1d/mypy-1.15.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2f2147ab812b75e5b5499b01ade1f4a81489a147c01585cda36019102538615f", size = 9749175, upload-time = "2025-02-05T03:50:13.411Z" }, + { url = "https://files.pythonhosted.org/packages/12/7e/873481abf1ef112c582db832740f4c11b2bfa510e829d6da29b0ab8c3f9c/mypy-1.15.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ce436f4c6d218a070048ed6a44c0bbb10cd2cc5e272b29e7845f6a2f57ee4464", size = 11455675, upload-time = "2025-02-05T03:50:31.421Z" }, + { url = "https://files.pythonhosted.org/packages/b3/d0/92ae4cde706923a2d3f2d6c39629134063ff64b9dedca9c1388363da072d/mypy-1.15.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8023ff13985661b50a5928fc7a5ca15f3d1affb41e5f0a9952cb68ef090b31ee", size = 12410020, upload-time = "2025-02-05T03:48:48.705Z" }, + { url = "https://files.pythonhosted.org/packages/46/8b/df49974b337cce35f828ba6fda228152d6db45fed4c86ba56ffe442434fd/mypy-1.15.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1124a18bc11a6a62887e3e137f37f53fbae476dc36c185d549d4f837a2a6a14e", size = 12498582, upload-time = "2025-02-05T03:49:03.628Z" }, + { url = "https://files.pythonhosted.org/packages/13/50/da5203fcf6c53044a0b699939f31075c45ae8a4cadf538a9069b165c1050/mypy-1.15.0-cp312-cp312-win_amd64.whl", hash = "sha256:171a9ca9a40cd1843abeca0e405bc1940cd9b305eaeea2dda769ba096932bb22", size = 9366614, upload-time = "2025-02-05T03:50:00.313Z" }, + { url = "https://files.pythonhosted.org/packages/6a/9b/fd2e05d6ffff24d912f150b87db9e364fa8282045c875654ce7e32fffa66/mypy-1.15.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:93faf3fdb04768d44bf28693293f3904bbb555d076b781ad2530214ee53e3445", size = 10788592, upload-time = "2025-02-05T03:48:55.789Z" }, + { url = "https://files.pythonhosted.org/packages/74/37/b246d711c28a03ead1fd906bbc7106659aed7c089d55fe40dd58db812628/mypy-1.15.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:811aeccadfb730024c5d3e326b2fbe9249bb7413553f15499a4050f7c30e801d", size = 9753611, upload-time = "2025-02-05T03:48:44.581Z" }, + { url = "https://files.pythonhosted.org/packages/a6/ac/395808a92e10cfdac8003c3de9a2ab6dc7cde6c0d2a4df3df1b815ffd067/mypy-1.15.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:98b7b9b9aedb65fe628c62a6dc57f6d5088ef2dfca37903a7d9ee374d03acca5", size = 11438443, upload-time = "2025-02-05T03:49:25.514Z" }, + { url = "https://files.pythonhosted.org/packages/d2/8b/801aa06445d2de3895f59e476f38f3f8d610ef5d6908245f07d002676cbf/mypy-1.15.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c43a7682e24b4f576d93072216bf56eeff70d9140241f9edec0c104d0c515036", size = 12402541, upload-time = "2025-02-05T03:49:57.623Z" }, + { url = "https://files.pythonhosted.org/packages/c7/67/5a4268782eb77344cc613a4cf23540928e41f018a9a1ec4c6882baf20ab8/mypy-1.15.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:baefc32840a9f00babd83251560e0ae1573e2f9d1b067719479bfb0e987c6357", size = 12494348, upload-time = "2025-02-05T03:48:52.361Z" }, + { url = "https://files.pythonhosted.org/packages/83/3e/57bb447f7bbbfaabf1712d96f9df142624a386d98fb026a761532526057e/mypy-1.15.0-cp313-cp313-win_amd64.whl", hash = "sha256:b9378e2c00146c44793c98b8d5a61039a048e31f429fb0eb546d93f4b000bedf", size = 9373648, upload-time = "2025-02-05T03:49:11.395Z" }, + { url = "https://files.pythonhosted.org/packages/09/4e/a7d65c7322c510de2c409ff3828b03354a7c43f5a8ed458a7a131b41c7b9/mypy-1.15.0-py3-none-any.whl", hash = "sha256:5469affef548bd1895d86d3bf10ce2b44e33d86923c29e4d675b3e323437ea3e", size = 2221777, upload-time = "2025-02-05T03:50:08.348Z" }, +] + +[[package]] +name = "mypy-extensions" +version = "1.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/98/a4/1ab47638b92648243faf97a5aeb6ea83059cc3624972ab6b8d2316078d3f/mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782", size = 4433, upload-time = "2023-02-04T12:11:27.157Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2a/e2/5d3f6ada4297caebe1a2add3b126fe800c96f56dbe5d1988a2cbe0b267aa/mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d", size = 4695, upload-time = "2023-02-04T12:11:25.002Z" }, +] + +[[package]] +name = "nodeenv" +version = "1.10.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/24/bf/d1bda4f6168e0b2e9e5958945e01910052158313224ada5ce1fb2e1113b8/nodeenv-1.10.0.tar.gz", hash = "sha256:996c191ad80897d076bdfba80a41994c2b47c68e224c542b48feba42ba00f8bb", size = 55611, upload-time = "2025-12-20T14:08:54.006Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/88/b2/d0896bdcdc8d28a7fc5717c305f1a861c26e18c05047949fb371034d98bd/nodeenv-1.10.0-py2.py3-none-any.whl", hash = "sha256:5bb13e3eed2923615535339b3c620e76779af4cb4c6a90deccc9e36b274d3827", size = 23438, upload-time = "2025-12-20T14:08:52.782Z" }, +] + +[[package]] +name = "packaging" +version = "24.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/51/65/50db4dda066951078f0a96cf12f4b9ada6e4b811516bf0262c0f4f7064d4/packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002", size = 148788, upload-time = "2024-06-09T23:19:24.956Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/08/aa/cc0199a5f0ad350994d660967a8efb233fe0416e4639146c089643407ce6/packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124", size = 53985, upload-time = "2024-06-09T23:19:21.909Z" }, +] + +[[package]] +name = "platformdirs" +version = "4.2.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f5/52/0763d1d976d5c262df53ddda8d8d4719eedf9594d046f117c25a27261a19/platformdirs-4.2.2.tar.gz", hash = "sha256:38b7b51f512eed9e84a22788b4bce1de17c0adb134d6becb09836e37d8654cd3", size = 20916, upload-time = "2024-05-15T03:18:23.372Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/68/13/2aa1f0e1364feb2c9ef45302f387ac0bd81484e9c9a4c5688a322fbdfd08/platformdirs-4.2.2-py3-none-any.whl", hash = "sha256:2d7a1657e36a80ea911db832a8a6ece5ee53d8de21edd5cc5879af6530b1bfee", size = 18146, upload-time = "2024-05-15T03:18:21.209Z" }, +] + +[[package]] +name = "pluggy" +version = "1.5.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/96/2d/02d4312c973c6050a18b314a5ad0b3210edb65a906f868e31c111dede4a6/pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1", size = 67955, upload-time = "2024-04-20T21:34:42.531Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/88/5f/e351af9a41f866ac3f1fac4ca0613908d9a41741cfcf2228f4ad853b697d/pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669", size = 20556, upload-time = "2024-04-20T21:34:40.434Z" }, +] + +[[package]] +name = "pydantic" +version = "2.11.7" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "annotated-types" }, + { name = "pydantic-core" }, + { name = "typing-extensions" }, + { name = "typing-inspection" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/00/dd/4325abf92c39ba8623b5af936ddb36ffcfe0beae70405d456ab1fb2f5b8c/pydantic-2.11.7.tar.gz", hash = "sha256:d989c3c6cb79469287b1569f7447a17848c998458d49ebe294e975b9baf0f0db", size = 788350, upload-time = "2025-06-14T08:33:17.137Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6a/c0/ec2b1c8712ca690e5d61979dee872603e92b8a32f94cc1b72d53beab008a/pydantic-2.11.7-py3-none-any.whl", hash = "sha256:dde5df002701f6de26248661f6835bbe296a47bf73990135c7d07ce741b9623b", size = 444782, upload-time = "2025-06-14T08:33:14.905Z" }, +] + +[[package]] +name = "pydantic-core" +version = "2.33.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ad/88/5f2260bdfae97aabf98f1778d43f69574390ad787afb646292a638c923d4/pydantic_core-2.33.2.tar.gz", hash = "sha256:7cb8bc3605c29176e1b105350d2e6474142d7c1bd1d9327c4a9bdb46bf827acc", size = 435195, upload-time = "2025-04-23T18:33:52.104Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e5/92/b31726561b5dae176c2d2c2dc43a9c5bfba5d32f96f8b4c0a600dd492447/pydantic_core-2.33.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2b3d326aaef0c0399d9afffeb6367d5e26ddc24d351dbc9c636840ac355dc5d8", size = 2028817, upload-time = "2025-04-23T18:30:43.919Z" }, + { url = "https://files.pythonhosted.org/packages/a3/44/3f0b95fafdaca04a483c4e685fe437c6891001bf3ce8b2fded82b9ea3aa1/pydantic_core-2.33.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0e5b2671f05ba48b94cb90ce55d8bdcaaedb8ba00cc5359f6810fc918713983d", size = 1861357, upload-time = "2025-04-23T18:30:46.372Z" }, + { url = "https://files.pythonhosted.org/packages/30/97/e8f13b55766234caae05372826e8e4b3b96e7b248be3157f53237682e43c/pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0069c9acc3f3981b9ff4cdfaf088e98d83440a4c7ea1bc07460af3d4dc22e72d", size = 1898011, upload-time = "2025-04-23T18:30:47.591Z" }, + { url = "https://files.pythonhosted.org/packages/9b/a3/99c48cf7bafc991cc3ee66fd544c0aae8dc907b752f1dad2d79b1b5a471f/pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d53b22f2032c42eaaf025f7c40c2e3b94568ae077a606f006d206a463bc69572", size = 1982730, upload-time = "2025-04-23T18:30:49.328Z" }, + { url = "https://files.pythonhosted.org/packages/de/8e/a5b882ec4307010a840fb8b58bd9bf65d1840c92eae7534c7441709bf54b/pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0405262705a123b7ce9f0b92f123334d67b70fd1f20a9372b907ce1080c7ba02", size = 2136178, upload-time = "2025-04-23T18:30:50.907Z" }, + { url = "https://files.pythonhosted.org/packages/e4/bb/71e35fc3ed05af6834e890edb75968e2802fe98778971ab5cba20a162315/pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4b25d91e288e2c4e0662b8038a28c6a07eaac3e196cfc4ff69de4ea3db992a1b", size = 2736462, upload-time = "2025-04-23T18:30:52.083Z" }, + { url = "https://files.pythonhosted.org/packages/31/0d/c8f7593e6bc7066289bbc366f2235701dcbebcd1ff0ef8e64f6f239fb47d/pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6bdfe4b3789761f3bcb4b1ddf33355a71079858958e3a552f16d5af19768fef2", size = 2005652, upload-time = "2025-04-23T18:30:53.389Z" }, + { url = "https://files.pythonhosted.org/packages/d2/7a/996d8bd75f3eda405e3dd219ff5ff0a283cd8e34add39d8ef9157e722867/pydantic_core-2.33.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:efec8db3266b76ef9607c2c4c419bdb06bf335ae433b80816089ea7585816f6a", size = 2113306, upload-time = "2025-04-23T18:30:54.661Z" }, + { url = "https://files.pythonhosted.org/packages/ff/84/daf2a6fb2db40ffda6578a7e8c5a6e9c8affb251a05c233ae37098118788/pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:031c57d67ca86902726e0fae2214ce6770bbe2f710dc33063187a68744a5ecac", size = 2073720, upload-time = "2025-04-23T18:30:56.11Z" }, + { url = "https://files.pythonhosted.org/packages/77/fb/2258da019f4825128445ae79456a5499c032b55849dbd5bed78c95ccf163/pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:f8de619080e944347f5f20de29a975c2d815d9ddd8be9b9b7268e2e3ef68605a", size = 2244915, upload-time = "2025-04-23T18:30:57.501Z" }, + { url = "https://files.pythonhosted.org/packages/d8/7a/925ff73756031289468326e355b6fa8316960d0d65f8b5d6b3a3e7866de7/pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:73662edf539e72a9440129f231ed3757faab89630d291b784ca99237fb94db2b", size = 2241884, upload-time = "2025-04-23T18:30:58.867Z" }, + { url = "https://files.pythonhosted.org/packages/0b/b0/249ee6d2646f1cdadcb813805fe76265745c4010cf20a8eba7b0e639d9b2/pydantic_core-2.33.2-cp310-cp310-win32.whl", hash = "sha256:0a39979dcbb70998b0e505fb1556a1d550a0781463ce84ebf915ba293ccb7e22", size = 1910496, upload-time = "2025-04-23T18:31:00.078Z" }, + { url = "https://files.pythonhosted.org/packages/66/ff/172ba8f12a42d4b552917aa65d1f2328990d3ccfc01d5b7c943ec084299f/pydantic_core-2.33.2-cp310-cp310-win_amd64.whl", hash = "sha256:b0379a2b24882fef529ec3b4987cb5d003b9cda32256024e6fe1586ac45fc640", size = 1955019, upload-time = "2025-04-23T18:31:01.335Z" }, + { url = "https://files.pythonhosted.org/packages/3f/8d/71db63483d518cbbf290261a1fc2839d17ff89fce7089e08cad07ccfce67/pydantic_core-2.33.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:4c5b0a576fb381edd6d27f0a85915c6daf2f8138dc5c267a57c08a62900758c7", size = 2028584, upload-time = "2025-04-23T18:31:03.106Z" }, + { url = "https://files.pythonhosted.org/packages/24/2f/3cfa7244ae292dd850989f328722d2aef313f74ffc471184dc509e1e4e5a/pydantic_core-2.33.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e799c050df38a639db758c617ec771fd8fb7a5f8eaaa4b27b101f266b216a246", size = 1855071, upload-time = "2025-04-23T18:31:04.621Z" }, + { url = "https://files.pythonhosted.org/packages/b3/d3/4ae42d33f5e3f50dd467761304be2fa0a9417fbf09735bc2cce003480f2a/pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dc46a01bf8d62f227d5ecee74178ffc448ff4e5197c756331f71efcc66dc980f", size = 1897823, upload-time = "2025-04-23T18:31:06.377Z" }, + { url = "https://files.pythonhosted.org/packages/f4/f3/aa5976e8352b7695ff808599794b1fba2a9ae2ee954a3426855935799488/pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a144d4f717285c6d9234a66778059f33a89096dfb9b39117663fd8413d582dcc", size = 1983792, upload-time = "2025-04-23T18:31:07.93Z" }, + { url = "https://files.pythonhosted.org/packages/d5/7a/cda9b5a23c552037717f2b2a5257e9b2bfe45e687386df9591eff7b46d28/pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:73cf6373c21bc80b2e0dc88444f41ae60b2f070ed02095754eb5a01df12256de", size = 2136338, upload-time = "2025-04-23T18:31:09.283Z" }, + { url = "https://files.pythonhosted.org/packages/2b/9f/b8f9ec8dd1417eb9da784e91e1667d58a2a4a7b7b34cf4af765ef663a7e5/pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3dc625f4aa79713512d1976fe9f0bc99f706a9dee21dfd1810b4bbbf228d0e8a", size = 2730998, upload-time = "2025-04-23T18:31:11.7Z" }, + { url = "https://files.pythonhosted.org/packages/47/bc/cd720e078576bdb8255d5032c5d63ee5c0bf4b7173dd955185a1d658c456/pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:881b21b5549499972441da4758d662aeea93f1923f953e9cbaff14b8b9565aef", size = 2003200, upload-time = "2025-04-23T18:31:13.536Z" }, + { url = "https://files.pythonhosted.org/packages/ca/22/3602b895ee2cd29d11a2b349372446ae9727c32e78a94b3d588a40fdf187/pydantic_core-2.33.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bdc25f3681f7b78572699569514036afe3c243bc3059d3942624e936ec93450e", size = 2113890, upload-time = "2025-04-23T18:31:15.011Z" }, + { url = "https://files.pythonhosted.org/packages/ff/e6/e3c5908c03cf00d629eb38393a98fccc38ee0ce8ecce32f69fc7d7b558a7/pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:fe5b32187cbc0c862ee201ad66c30cf218e5ed468ec8dc1cf49dec66e160cc4d", size = 2073359, upload-time = "2025-04-23T18:31:16.393Z" }, + { url = "https://files.pythonhosted.org/packages/12/e7/6a36a07c59ebefc8777d1ffdaf5ae71b06b21952582e4b07eba88a421c79/pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:bc7aee6f634a6f4a95676fcb5d6559a2c2a390330098dba5e5a5f28a2e4ada30", size = 2245883, upload-time = "2025-04-23T18:31:17.892Z" }, + { url = "https://files.pythonhosted.org/packages/16/3f/59b3187aaa6cc0c1e6616e8045b284de2b6a87b027cce2ffcea073adf1d2/pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:235f45e5dbcccf6bd99f9f472858849f73d11120d76ea8707115415f8e5ebebf", size = 2241074, upload-time = "2025-04-23T18:31:19.205Z" }, + { url = "https://files.pythonhosted.org/packages/e0/ed/55532bb88f674d5d8f67ab121a2a13c385df382de2a1677f30ad385f7438/pydantic_core-2.33.2-cp311-cp311-win32.whl", hash = "sha256:6368900c2d3ef09b69cb0b913f9f8263b03786e5b2a387706c5afb66800efd51", size = 1910538, upload-time = "2025-04-23T18:31:20.541Z" }, + { url = "https://files.pythonhosted.org/packages/fe/1b/25b7cccd4519c0b23c2dd636ad39d381abf113085ce4f7bec2b0dc755eb1/pydantic_core-2.33.2-cp311-cp311-win_amd64.whl", hash = "sha256:1e063337ef9e9820c77acc768546325ebe04ee38b08703244c1309cccc4f1bab", size = 1952909, upload-time = "2025-04-23T18:31:22.371Z" }, + { url = "https://files.pythonhosted.org/packages/49/a9/d809358e49126438055884c4366a1f6227f0f84f635a9014e2deb9b9de54/pydantic_core-2.33.2-cp311-cp311-win_arm64.whl", hash = "sha256:6b99022f1d19bc32a4c2a0d544fc9a76e3be90f0b3f4af413f87d38749300e65", size = 1897786, upload-time = "2025-04-23T18:31:24.161Z" }, + { url = "https://files.pythonhosted.org/packages/18/8a/2b41c97f554ec8c71f2a8a5f85cb56a8b0956addfe8b0efb5b3d77e8bdc3/pydantic_core-2.33.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:a7ec89dc587667f22b6a0b6579c249fca9026ce7c333fc142ba42411fa243cdc", size = 2009000, upload-time = "2025-04-23T18:31:25.863Z" }, + { url = "https://files.pythonhosted.org/packages/a1/02/6224312aacb3c8ecbaa959897af57181fb6cf3a3d7917fd44d0f2917e6f2/pydantic_core-2.33.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3c6db6e52c6d70aa0d00d45cdb9b40f0433b96380071ea80b09277dba021ddf7", size = 1847996, upload-time = "2025-04-23T18:31:27.341Z" }, + { url = "https://files.pythonhosted.org/packages/d6/46/6dcdf084a523dbe0a0be59d054734b86a981726f221f4562aed313dbcb49/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e61206137cbc65e6d5256e1166f88331d3b6238e082d9f74613b9b765fb9025", size = 1880957, upload-time = "2025-04-23T18:31:28.956Z" }, + { url = "https://files.pythonhosted.org/packages/ec/6b/1ec2c03837ac00886ba8160ce041ce4e325b41d06a034adbef11339ae422/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eb8c529b2819c37140eb51b914153063d27ed88e3bdc31b71198a198e921e011", size = 1964199, upload-time = "2025-04-23T18:31:31.025Z" }, + { url = "https://files.pythonhosted.org/packages/2d/1d/6bf34d6adb9debd9136bd197ca72642203ce9aaaa85cfcbfcf20f9696e83/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c52b02ad8b4e2cf14ca7b3d918f3eb0ee91e63b3167c32591e57c4317e134f8f", size = 2120296, upload-time = "2025-04-23T18:31:32.514Z" }, + { url = "https://files.pythonhosted.org/packages/e0/94/2bd0aaf5a591e974b32a9f7123f16637776c304471a0ab33cf263cf5591a/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:96081f1605125ba0855dfda83f6f3df5ec90c61195421ba72223de35ccfb2f88", size = 2676109, upload-time = "2025-04-23T18:31:33.958Z" }, + { url = "https://files.pythonhosted.org/packages/f9/41/4b043778cf9c4285d59742281a769eac371b9e47e35f98ad321349cc5d61/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f57a69461af2a5fa6e6bbd7a5f60d3b7e6cebb687f55106933188e79ad155c1", size = 2002028, upload-time = "2025-04-23T18:31:39.095Z" }, + { url = "https://files.pythonhosted.org/packages/cb/d5/7bb781bf2748ce3d03af04d5c969fa1308880e1dca35a9bd94e1a96a922e/pydantic_core-2.33.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:572c7e6c8bb4774d2ac88929e3d1f12bc45714ae5ee6d9a788a9fb35e60bb04b", size = 2100044, upload-time = "2025-04-23T18:31:41.034Z" }, + { url = "https://files.pythonhosted.org/packages/fe/36/def5e53e1eb0ad896785702a5bbfd25eed546cdcf4087ad285021a90ed53/pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:db4b41f9bd95fbe5acd76d89920336ba96f03e149097365afe1cb092fceb89a1", size = 2058881, upload-time = "2025-04-23T18:31:42.757Z" }, + { url = "https://files.pythonhosted.org/packages/01/6c/57f8d70b2ee57fc3dc8b9610315949837fa8c11d86927b9bb044f8705419/pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:fa854f5cf7e33842a892e5c73f45327760bc7bc516339fda888c75ae60edaeb6", size = 2227034, upload-time = "2025-04-23T18:31:44.304Z" }, + { url = "https://files.pythonhosted.org/packages/27/b9/9c17f0396a82b3d5cbea4c24d742083422639e7bb1d5bf600e12cb176a13/pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:5f483cfb75ff703095c59e365360cb73e00185e01aaea067cd19acffd2ab20ea", size = 2234187, upload-time = "2025-04-23T18:31:45.891Z" }, + { url = "https://files.pythonhosted.org/packages/b0/6a/adf5734ffd52bf86d865093ad70b2ce543415e0e356f6cacabbc0d9ad910/pydantic_core-2.33.2-cp312-cp312-win32.whl", hash = "sha256:9cb1da0f5a471435a7bc7e439b8a728e8b61e59784b2af70d7c169f8dd8ae290", size = 1892628, upload-time = "2025-04-23T18:31:47.819Z" }, + { url = "https://files.pythonhosted.org/packages/43/e4/5479fecb3606c1368d496a825d8411e126133c41224c1e7238be58b87d7e/pydantic_core-2.33.2-cp312-cp312-win_amd64.whl", hash = "sha256:f941635f2a3d96b2973e867144fde513665c87f13fe0e193c158ac51bfaaa7b2", size = 1955866, upload-time = "2025-04-23T18:31:49.635Z" }, + { url = "https://files.pythonhosted.org/packages/0d/24/8b11e8b3e2be9dd82df4b11408a67c61bb4dc4f8e11b5b0fc888b38118b5/pydantic_core-2.33.2-cp312-cp312-win_arm64.whl", hash = "sha256:cca3868ddfaccfbc4bfb1d608e2ccaaebe0ae628e1416aeb9c4d88c001bb45ab", size = 1888894, upload-time = "2025-04-23T18:31:51.609Z" }, + { url = "https://files.pythonhosted.org/packages/46/8c/99040727b41f56616573a28771b1bfa08a3d3fe74d3d513f01251f79f172/pydantic_core-2.33.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:1082dd3e2d7109ad8b7da48e1d4710c8d06c253cbc4a27c1cff4fbcaa97a9e3f", size = 2015688, upload-time = "2025-04-23T18:31:53.175Z" }, + { url = "https://files.pythonhosted.org/packages/3a/cc/5999d1eb705a6cefc31f0b4a90e9f7fc400539b1a1030529700cc1b51838/pydantic_core-2.33.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f517ca031dfc037a9c07e748cefd8d96235088b83b4f4ba8939105d20fa1dcd6", size = 1844808, upload-time = "2025-04-23T18:31:54.79Z" }, + { url = "https://files.pythonhosted.org/packages/6f/5e/a0a7b8885c98889a18b6e376f344da1ef323d270b44edf8174d6bce4d622/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a9f2c9dd19656823cb8250b0724ee9c60a82f3cdf68a080979d13092a3b0fef", size = 1885580, upload-time = "2025-04-23T18:31:57.393Z" }, + { url = "https://files.pythonhosted.org/packages/3b/2a/953581f343c7d11a304581156618c3f592435523dd9d79865903272c256a/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2b0a451c263b01acebe51895bfb0e1cc842a5c666efe06cdf13846c7418caa9a", size = 1973859, upload-time = "2025-04-23T18:31:59.065Z" }, + { url = "https://files.pythonhosted.org/packages/e6/55/f1a813904771c03a3f97f676c62cca0c0a4138654107c1b61f19c644868b/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ea40a64d23faa25e62a70ad163571c0b342b8bf66d5fa612ac0dec4f069d916", size = 2120810, upload-time = "2025-04-23T18:32:00.78Z" }, + { url = "https://files.pythonhosted.org/packages/aa/c3/053389835a996e18853ba107a63caae0b9deb4a276c6b472931ea9ae6e48/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0fb2d542b4d66f9470e8065c5469ec676978d625a8b7a363f07d9a501a9cb36a", size = 2676498, upload-time = "2025-04-23T18:32:02.418Z" }, + { url = "https://files.pythonhosted.org/packages/eb/3c/f4abd740877a35abade05e437245b192f9d0ffb48bbbbd708df33d3cda37/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdac5d6ffa1b5a83bca06ffe7583f5576555e6c8b3a91fbd25ea7780f825f7d", size = 2000611, upload-time = "2025-04-23T18:32:04.152Z" }, + { url = "https://files.pythonhosted.org/packages/59/a7/63ef2fed1837d1121a894d0ce88439fe3e3b3e48c7543b2a4479eb99c2bd/pydantic_core-2.33.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:04a1a413977ab517154eebb2d326da71638271477d6ad87a769102f7c2488c56", size = 2107924, upload-time = "2025-04-23T18:32:06.129Z" }, + { url = "https://files.pythonhosted.org/packages/04/8f/2551964ef045669801675f1cfc3b0d74147f4901c3ffa42be2ddb1f0efc4/pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:c8e7af2f4e0194c22b5b37205bfb293d166a7344a5b0d0eaccebc376546d77d5", size = 2063196, upload-time = "2025-04-23T18:32:08.178Z" }, + { url = "https://files.pythonhosted.org/packages/26/bd/d9602777e77fc6dbb0c7db9ad356e9a985825547dce5ad1d30ee04903918/pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:5c92edd15cd58b3c2d34873597a1e20f13094f59cf88068adb18947df5455b4e", size = 2236389, upload-time = "2025-04-23T18:32:10.242Z" }, + { url = "https://files.pythonhosted.org/packages/42/db/0e950daa7e2230423ab342ae918a794964b053bec24ba8af013fc7c94846/pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:65132b7b4a1c0beded5e057324b7e16e10910c106d43675d9bd87d4f38dde162", size = 2239223, upload-time = "2025-04-23T18:32:12.382Z" }, + { url = "https://files.pythonhosted.org/packages/58/4d/4f937099c545a8a17eb52cb67fe0447fd9a373b348ccfa9a87f141eeb00f/pydantic_core-2.33.2-cp313-cp313-win32.whl", hash = "sha256:52fb90784e0a242bb96ec53f42196a17278855b0f31ac7c3cc6f5c1ec4811849", size = 1900473, upload-time = "2025-04-23T18:32:14.034Z" }, + { url = "https://files.pythonhosted.org/packages/a0/75/4a0a9bac998d78d889def5e4ef2b065acba8cae8c93696906c3a91f310ca/pydantic_core-2.33.2-cp313-cp313-win_amd64.whl", hash = "sha256:c083a3bdd5a93dfe480f1125926afcdbf2917ae714bdb80b36d34318b2bec5d9", size = 1955269, upload-time = "2025-04-23T18:32:15.783Z" }, + { url = "https://files.pythonhosted.org/packages/f9/86/1beda0576969592f1497b4ce8e7bc8cbdf614c352426271b1b10d5f0aa64/pydantic_core-2.33.2-cp313-cp313-win_arm64.whl", hash = "sha256:e80b087132752f6b3d714f041ccf74403799d3b23a72722ea2e6ba2e892555b9", size = 1893921, upload-time = "2025-04-23T18:32:18.473Z" }, + { url = "https://files.pythonhosted.org/packages/a4/7d/e09391c2eebeab681df2b74bfe6c43422fffede8dc74187b2b0bf6fd7571/pydantic_core-2.33.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:61c18fba8e5e9db3ab908620af374db0ac1baa69f0f32df4f61ae23f15e586ac", size = 1806162, upload-time = "2025-04-23T18:32:20.188Z" }, + { url = "https://files.pythonhosted.org/packages/f1/3d/847b6b1fed9f8ed3bb95a9ad04fbd0b212e832d4f0f50ff4d9ee5a9f15cf/pydantic_core-2.33.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95237e53bb015f67b63c91af7518a62a8660376a6a0db19b89acc77a4d6199f5", size = 1981560, upload-time = "2025-04-23T18:32:22.354Z" }, + { url = "https://files.pythonhosted.org/packages/6f/9a/e73262f6c6656262b5fdd723ad90f518f579b7bc8622e43a942eec53c938/pydantic_core-2.33.2-cp313-cp313t-win_amd64.whl", hash = "sha256:c2fc0a768ef76c15ab9238afa6da7f69895bb5d1ee83aeea2e3509af4472d0b9", size = 1935777, upload-time = "2025-04-23T18:32:25.088Z" }, + { url = "https://files.pythonhosted.org/packages/30/68/373d55e58b7e83ce371691f6eaa7175e3a24b956c44628eb25d7da007917/pydantic_core-2.33.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5c4aa4e82353f65e548c476b37e64189783aa5384903bfea4f41580f255fddfa", size = 2023982, upload-time = "2025-04-23T18:32:53.14Z" }, + { url = "https://files.pythonhosted.org/packages/a4/16/145f54ac08c96a63d8ed6442f9dec17b2773d19920b627b18d4f10a061ea/pydantic_core-2.33.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d946c8bf0d5c24bf4fe333af284c59a19358aa3ec18cb3dc4370080da1e8ad29", size = 1858412, upload-time = "2025-04-23T18:32:55.52Z" }, + { url = "https://files.pythonhosted.org/packages/41/b1/c6dc6c3e2de4516c0bb2c46f6a373b91b5660312342a0cf5826e38ad82fa/pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:87b31b6846e361ef83fedb187bb5b4372d0da3f7e28d85415efa92d6125d6e6d", size = 1892749, upload-time = "2025-04-23T18:32:57.546Z" }, + { url = "https://files.pythonhosted.org/packages/12/73/8cd57e20afba760b21b742106f9dbdfa6697f1570b189c7457a1af4cd8a0/pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa9d91b338f2df0508606f7009fde642391425189bba6d8c653afd80fd6bb64e", size = 2067527, upload-time = "2025-04-23T18:32:59.771Z" }, + { url = "https://files.pythonhosted.org/packages/e3/d5/0bb5d988cc019b3cba4a78f2d4b3854427fc47ee8ec8e9eaabf787da239c/pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2058a32994f1fde4ca0480ab9d1e75a0e8c87c22b53a3ae66554f9af78f2fe8c", size = 2108225, upload-time = "2025-04-23T18:33:04.51Z" }, + { url = "https://files.pythonhosted.org/packages/f1/c5/00c02d1571913d496aabf146106ad8239dc132485ee22efe08085084ff7c/pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:0e03262ab796d986f978f79c943fc5f620381be7287148b8010b4097f79a39ec", size = 2069490, upload-time = "2025-04-23T18:33:06.391Z" }, + { url = "https://files.pythonhosted.org/packages/22/a8/dccc38768274d3ed3a59b5d06f59ccb845778687652daa71df0cab4040d7/pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:1a8695a8d00c73e50bff9dfda4d540b7dee29ff9b8053e38380426a85ef10052", size = 2237525, upload-time = "2025-04-23T18:33:08.44Z" }, + { url = "https://files.pythonhosted.org/packages/d4/e7/4f98c0b125dda7cf7ccd14ba936218397b44f50a56dd8c16a3091df116c3/pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:fa754d1850735a0b0e03bcffd9d4b4343eb417e47196e4485d9cca326073a42c", size = 2238446, upload-time = "2025-04-23T18:33:10.313Z" }, + { url = "https://files.pythonhosted.org/packages/ce/91/2ec36480fdb0b783cd9ef6795753c1dea13882f2e68e73bce76ae8c21e6a/pydantic_core-2.33.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:a11c8d26a50bfab49002947d3d237abe4d9e4b5bdc8846a63537b6488e197808", size = 2066678, upload-time = "2025-04-23T18:33:12.224Z" }, + { url = "https://files.pythonhosted.org/packages/7b/27/d4ae6487d73948d6f20dddcd94be4ea43e74349b56eba82e9bdee2d7494c/pydantic_core-2.33.2-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:dd14041875d09cc0f9308e37a6f8b65f5585cf2598a53aa0123df8b129d481f8", size = 2025200, upload-time = "2025-04-23T18:33:14.199Z" }, + { url = "https://files.pythonhosted.org/packages/f1/b8/b3cb95375f05d33801024079b9392a5ab45267a63400bf1866e7ce0f0de4/pydantic_core-2.33.2-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:d87c561733f66531dced0da6e864f44ebf89a8fba55f31407b00c2f7f9449593", size = 1859123, upload-time = "2025-04-23T18:33:16.555Z" }, + { url = "https://files.pythonhosted.org/packages/05/bc/0d0b5adeda59a261cd30a1235a445bf55c7e46ae44aea28f7bd6ed46e091/pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2f82865531efd18d6e07a04a17331af02cb7a651583c418df8266f17a63c6612", size = 1892852, upload-time = "2025-04-23T18:33:18.513Z" }, + { url = "https://files.pythonhosted.org/packages/3e/11/d37bdebbda2e449cb3f519f6ce950927b56d62f0b84fd9cb9e372a26a3d5/pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bfb5112df54209d820d7bf9317c7a6c9025ea52e49f46b6a2060104bba37de7", size = 2067484, upload-time = "2025-04-23T18:33:20.475Z" }, + { url = "https://files.pythonhosted.org/packages/8c/55/1f95f0a05ce72ecb02a8a8a1c3be0579bbc29b1d5ab68f1378b7bebc5057/pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:64632ff9d614e5eecfb495796ad51b0ed98c453e447a76bcbeeb69615079fc7e", size = 2108896, upload-time = "2025-04-23T18:33:22.501Z" }, + { url = "https://files.pythonhosted.org/packages/53/89/2b2de6c81fa131f423246a9109d7b2a375e83968ad0800d6e57d0574629b/pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:f889f7a40498cc077332c7ab6b4608d296d852182211787d4f3ee377aaae66e8", size = 2069475, upload-time = "2025-04-23T18:33:24.528Z" }, + { url = "https://files.pythonhosted.org/packages/b8/e9/1f7efbe20d0b2b10f6718944b5d8ece9152390904f29a78e68d4e7961159/pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:de4b83bb311557e439b9e186f733f6c645b9417c84e2eb8203f3f820a4b988bf", size = 2239013, upload-time = "2025-04-23T18:33:26.621Z" }, + { url = "https://files.pythonhosted.org/packages/3c/b2/5309c905a93811524a49b4e031e9851a6b00ff0fb668794472ea7746b448/pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:82f68293f055f51b51ea42fafc74b6aad03e70e191799430b90c13d643059ebb", size = 2238715, upload-time = "2025-04-23T18:33:28.656Z" }, + { url = "https://files.pythonhosted.org/packages/32/56/8a7ca5d2cd2cda1d245d34b1c9a942920a718082ae8e54e5f3e5a58b7add/pydantic_core-2.33.2-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:329467cecfb529c925cf2bbd4d60d2c509bc2fb52a20c1045bf09bb70971a9c1", size = 2066757, upload-time = "2025-04-23T18:33:30.645Z" }, +] + +[[package]] +name = "pylint" +version = "3.2.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "astroid" }, + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "dill" }, + { name = "isort" }, + { name = "mccabe" }, + { name = "platformdirs" }, + { name = "tomli", marker = "python_full_version < '3.11'" }, + { name = "tomlkit" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/9a/e9/60280b14cc1012794120345ce378504cf17409e38cd88f455dc24e0ad6b5/pylint-3.2.3.tar.gz", hash = "sha256:02f6c562b215582386068d52a30f520d84fdbcf2a95fc7e855b816060d048b60", size = 1506739, upload-time = "2024-06-06T14:19:17.955Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/50/d3/d346f779cbc9384d8b805a7557b5f2b8ee9f842bffebec9fc6364d6ae183/pylint-3.2.3-py3-none-any.whl", hash = "sha256:b3d7d2708a3e04b4679e02d99e72329a8b7ee8afb8d04110682278781f889fa8", size = 519244, upload-time = "2024-06-06T14:19:13.228Z" }, +] + +[[package]] +name = "pyright" +version = "1.1.408" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "nodeenv" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/74/b2/5db700e52554b8f025faa9c3c624c59f1f6c8841ba81ab97641b54322f16/pyright-1.1.408.tar.gz", hash = "sha256:f28f2321f96852fa50b5829ea492f6adb0e6954568d1caa3f3af3a5f555eb684", size = 4400578, upload-time = "2026-01-08T08:07:38.795Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0c/82/a2c93e32800940d9573fb28c346772a14778b84ba7524e691b324620ab89/pyright-1.1.408-py3-none-any.whl", hash = "sha256:090b32865f4fdb1e0e6cd82bf5618480d48eecd2eb2e70f960982a3d9a4c17c1", size = 6399144, upload-time = "2026-01-08T08:07:37.082Z" }, +] + +[[package]] +name = "pytest" +version = "8.3.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "exceptiongroup", marker = "python_full_version < '3.11'" }, + { name = "iniconfig" }, + { name = "packaging" }, + { name = "pluggy" }, + { name = "tomli", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b4/8c/9862305bdcd6020bc7b45b1b5e7397a6caf1a33d3025b9a003b39075ffb2/pytest-8.3.2.tar.gz", hash = "sha256:c132345d12ce551242c87269de812483f5bcc87cdbb4722e48487ba194f9fdce", size = 1439314, upload-time = "2024-07-25T10:40:00.159Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0f/f9/cf155cf32ca7d6fa3601bc4c5dd19086af4b320b706919d48a4c79081cf9/pytest-8.3.2-py3-none-any.whl", hash = "sha256:4ba08f9ae7dcf84ded419494d229b48d0903ea6407b030eaec46df5e6a73bba5", size = 341802, upload-time = "2024-07-25T10:39:57.834Z" }, +] + +[[package]] +name = "pytest-asyncio" +version = "0.23.8" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pytest" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/de/b4/0b378b7bf26a8ae161c3890c0b48a91a04106c5713ce81b4b080ea2f4f18/pytest_asyncio-0.23.8.tar.gz", hash = "sha256:759b10b33a6dc61cce40a8bd5205e302978bbbcc00e279a8b61d9a6a3c82e4d3", size = 46920, upload-time = "2024-07-17T17:39:34.617Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ee/82/62e2d63639ecb0fbe8a7ee59ef0bc69a4669ec50f6d3459f74ad4e4189a2/pytest_asyncio-0.23.8-py3-none-any.whl", hash = "sha256:50265d892689a5faefb84df80819d1ecef566eb3549cf915dfb33569359d1ce2", size = 17663, upload-time = "2024-07-17T17:39:32.478Z" }, +] + +[[package]] +name = "sniffio" +version = "1.3.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a2/87/a6771e1546d97e7e041b6ae58d80074f81b7d5121207425c964ddf5cfdbd/sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc", size = 20372, upload-time = "2024-02-25T23:20:04.057Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e9/44/75a9c9421471a6c4805dbf2356f7c181a29c1879239abab1ea2cc8f38b40/sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2", size = 10235, upload-time = "2024-02-25T23:20:01.196Z" }, +] + +[[package]] +name = "tomli" +version = "2.0.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/c0/3f/d7af728f075fb08564c5949a9c95e44352e23dee646869fa104a3b2060a3/tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f", size = 15164, upload-time = "2022-02-08T10:54:04.006Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/97/75/10a9ebee3fd790d20926a90a2547f0bf78f371b2f13aa822c759680ca7b9/tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc", size = 12757, upload-time = "2022-02-08T10:54:02.017Z" }, +] + +[[package]] +name = "tomlkit" +version = "0.13.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/4b/34/f5f4fbc6b329c948a90468dd423aaa3c3bfc1e07d5a76deec269110f2f6e/tomlkit-0.13.0.tar.gz", hash = "sha256:08ad192699734149f5b97b45f1f18dad7eb1b6d16bc72ad0c2335772650d7b72", size = 191792, upload-time = "2024-07-10T09:25:56.381Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fd/7c/b753bf603852cab0a660da6e81f4ea5d2ca0f0b2b4870766d7aa9bceb7a2/tomlkit-0.13.0-py3-none-any.whl", hash = "sha256:7075d3042d03b80f603482d69bf0c8f345c2b30e41699fd8883227f89972b264", size = 37770, upload-time = "2024-07-10T09:25:54.676Z" }, +] + +[[package]] +name = "typing-extensions" +version = "4.12.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/df/db/f35a00659bc03fec321ba8bce9420de607a1d37f8342eee1863174c69557/typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8", size = 85321, upload-time = "2024-06-07T18:52:15.995Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/26/9f/ad63fc0248c5379346306f8668cda6e2e2e9c95e01216d2b8ffd9ff037d0/typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d", size = 37438, upload-time = "2024-06-07T18:52:13.582Z" }, +] + +[[package]] +name = "typing-inspection" +version = "0.4.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/82/5c/e6082df02e215b846b4b8c0b887a64d7d08ffaba30605502639d44c06b82/typing_inspection-0.4.0.tar.gz", hash = "sha256:9765c87de36671694a67904bf2c96e395be9c6439bb6c87b5142569dcdd65122", size = 76222, upload-time = "2025-02-25T17:27:59.638Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/31/08/aa4fdfb71f7de5176385bd9e90852eaf6b5d622735020ad600f2bab54385/typing_inspection-0.4.0-py3-none-any.whl", hash = "sha256:50e72559fcd2a6367a19f7a7e610e6afcb9fac940c650290eed893d61386832f", size = 14125, upload-time = "2025-02-25T17:27:57.754Z" }, +] diff --git a/packages/gcp/.genignore b/packages/gcp/.genignore new file mode 100644 index 00000000..9a119b75 --- /dev/null +++ b/packages/gcp/.genignore @@ -0,0 +1,6 @@ +pyproject.toml +src/mistralai/gcp/client/sdk.py +src/mistralai/gcp/client/_hooks/registration.py +README.md +USAGE.md +docs/sdks/**/README.md diff --git a/packages/gcp/.gitattributes b/packages/gcp/.gitattributes new file mode 100644 index 00000000..4d75d590 --- /dev/null +++ b/packages/gcp/.gitattributes @@ -0,0 +1,2 @@ +# This allows generated code to be indexed correctly +*.py linguist-generated=false \ No newline at end of file diff --git a/packages/gcp/.gitignore b/packages/gcp/.gitignore new file mode 100644 index 00000000..b386de74 --- /dev/null +++ b/packages/gcp/.gitignore @@ -0,0 +1,15 @@ +.env +.env.local +**/__pycache__/ +**/.speakeasy/temp/ +**/.speakeasy/logs/ +.speakeasy/reports +README-PYPI.md +.venv/ +venv/ +src/*.egg-info/ +__pycache__/ +.pytest_cache/ +.python-version +.DS_Store +pyrightconfig.json diff --git a/packages/gcp/.speakeasy/gen.lock b/packages/gcp/.speakeasy/gen.lock new file mode 100644 index 00000000..e7932c9e --- /dev/null +++ b/packages/gcp/.speakeasy/gen.lock @@ -0,0 +1,803 @@ +lockVersion: 2.0.0 +id: ec60f2d8-7869-45c1-918e-773d41a8cf74 +management: + docChecksum: 58bc2dcdd83a2b7c4856971baa20641b + docVersion: 1.0.0 + speakeasyVersion: 1.729.0 + generationVersion: 2.841.0 + releaseVersion: 2.0.0rc1 + configChecksum: 5b0554f4a04c849bc6f376bec69654aa + repoURL: https://github.com/mistralai/client-python.git + repoSubDirectory: packages/gcp + installationURL: https://github.com/mistralai/client-python.git#subdirectory=packages/gcp + published: true +persistentEdits: + generation_id: 724d8b32-d30d-4743-9e65-0bd450961ed2 + pristine_commit_hash: 7ef9f2e9ed9ed33b6f502afc01b1354bd0c499d1 + pristine_tree_hash: 19a1b62b3168a95e58538e2da4215028949b1ba2 +features: + python: + additionalDependencies: 1.0.0 + additionalProperties: 1.0.1 + configurableModuleName: 0.2.0 + constsAndDefaults: 1.0.7 + core: 6.0.12 + defaultEnabledRetries: 0.2.0 + enumUnions: 0.1.0 + envVarSecurityUsage: 0.3.2 + examples: 3.0.2 + flatRequests: 1.0.1 + globalSecurity: 3.0.5 + globalSecurityCallbacks: 1.0.0 + globalSecurityFlattening: 1.0.0 + globalServerURLs: 3.2.0 + includes: 3.0.0 + methodArguments: 1.0.2 + nameOverrides: 3.0.3 + nullables: 1.0.2 + openEnums: 1.0.4 + responseFormat: 1.1.0 + retries: 3.0.4 + sdkHooks: 1.2.1 + serverEvents: 1.0.13 + serverEventsSentinels: 0.1.0 + serverIDs: 3.0.0 + unions: 3.1.4 +trackedFiles: + .gitattributes: + id: 24139dae6567 + last_write_checksum: sha1:53134de3ada576f37c22276901e1b5b6d85cd2da + pristine_git_object: 4d75d59008e4d8609876d263419a9dc56c8d6f3a + .vscode/settings.json: + id: 89aa447020cd + last_write_checksum: sha1:f84632c81029fcdda8c3b0c768d02b836fc80526 + pristine_git_object: 8d79f0abb72526f1fb34a4c03e5bba612c6ba2ae + docs/errors/httpvalidationerror.md: + id: 7fe2e5327e07 + last_write_checksum: sha1:277a46811144643262651853dc6176d21b33573e + pristine_git_object: 712a148c3e2305dca4c702851865f9f8c8e674cc + docs/models/arguments.md: + id: 7ea5e33709a7 + last_write_checksum: sha1:09eea126210d7fd0353e60a76bf1dbed173f13ec + pristine_git_object: 2e54e27e0ca97bee87918b2ae38cc6c335669a79 + docs/models/assistantmessage.md: + id: 7e0218023943 + last_write_checksum: sha1:47d5cd1a1bef9e398c12c207f5b3d8486d94f359 + pristine_git_object: 9ef638379aee1198742743800e778409c47a9b9d + docs/models/assistantmessagecontent.md: + id: 9f1795bbe642 + last_write_checksum: sha1:1ce4066623a8d62d969e5ed3a088d73a9ba26643 + pristine_git_object: 047b7cf95f4db203bf2c501680b73ca0562a122d + docs/models/chatcompletionchoice.md: + id: 0d15c59ab501 + last_write_checksum: sha1:a6274a39a4239e054816d08517bf8507cb5c4564 + pristine_git_object: deaa0ea073e1b6c21bd466c10db31db2464066f1 + docs/models/chatcompletionchoicefinishreason.md: + id: 225764da91d3 + last_write_checksum: sha1:b894d3408cb801e072c3c302a5676ff939d59284 + pristine_git_object: b2f15ecbe88328de95b4961ddb3940fd8a6ee64b + docs/models/chatcompletionrequest.md: + id: adffe90369d0 + last_write_checksum: sha1:6374e05aeb66d48137d657acaa89527df2db35c6 + pristine_git_object: 8dbd4a82ad1d7725b9a6ce56daea208ca01b9210 + docs/models/chatcompletionrequestmessage.md: + id: 3f5e170d418c + last_write_checksum: sha1:7921c5a508a9f88adc01caab34e26182b8035607 + pristine_git_object: 91e9e062d0ef0cb69235c4ae4516548733ce28a9 + docs/models/chatcompletionrequeststop.md: + id: fcaf5bbea451 + last_write_checksum: sha1:71a25f84f0d88c7acf72e801ced6159546201851 + pristine_git_object: 749296d420c0671d2a1d6d22483b51f577a86485 + docs/models/chatcompletionrequesttoolchoice.md: + id: b97041b2f15b + last_write_checksum: sha1:7ad7eb133f70e07d0d6a9def36aadd08b35cf861 + pristine_git_object: dc82a8ef91e7bfd44f1d2d9d9a4ef61b6e76cc34 + docs/models/chatcompletionresponse.md: + id: 7c53b24681b9 + last_write_checksum: sha1:a56581c0846638cfe6df26d3045fb4f874ccd931 + pristine_git_object: a0465ffbfc5558628953e03fbc53b80bbdc8649b + docs/models/chatcompletionstreamrequest.md: + id: cf8f29558a68 + last_write_checksum: sha1:e23cf88a5a9b0c99e68d06a8450b8bfb9aee33a2 + pristine_git_object: db76b6c81a71607f94c212a542fe30e082053a90 + docs/models/chatcompletionstreamrequestmessage.md: + id: 053a98476cd2 + last_write_checksum: sha1:8270692463fab1243d9de4bbef7162daa64e52c5 + pristine_git_object: 2e4e93acca8983a3ea27b391d4606518946e13fe + docs/models/chatcompletionstreamrequeststop.md: + id: d0e89a4dca78 + last_write_checksum: sha1:a889e9580fa94bda7c848682d6ba501b7f5c0f41 + pristine_git_object: a48460a92ac47fec1de2188ba46b238229736d32 + docs/models/chatcompletionstreamrequesttoolchoice.md: + id: 210d5e5b1413 + last_write_checksum: sha1:0543164caf3f4fb2bef3061dbd1a5e6b34b17ae9 + pristine_git_object: 43f3ca3809bf1a2a040e2ad7c19a2b22db0b73f8 + docs/models/completionchunk.md: + id: 60cb30423c60 + last_write_checksum: sha1:61b976fe2e71236cf7941ee1635decc31bd304b2 + pristine_git_object: 7f8ab5e631e2c6d1d9830325e591a7e434b83a35 + docs/models/completionevent.md: + id: e57cd17cb9dc + last_write_checksum: sha1:4f59c67af0b11c77b80d2b9c7aca36484d2be219 + pristine_git_object: 7a66e8fee2bb0f1c58166177653893bb05b98f1d + docs/models/completionresponsestreamchoice.md: + id: d56824d615a6 + last_write_checksum: sha1:0296a490df009dbfd04893fdebcc88dd6102a872 + pristine_git_object: 1532c25b8fc065d486f52d4610a7f757e5340875 + docs/models/completionresponsestreamchoicefinishreason.md: + id: 5f1fbfc90b8e + last_write_checksum: sha1:20824b4a223cbd3658b32440973a7d47dcd108b9 + pristine_git_object: 0fece473297227c75db4e7ded63417a2f117cac0 + docs/models/contentchunk.md: + id: d2d3a32080cd + last_write_checksum: sha1:5839a26cdc412b78caad7fb59df97bdcea57be6d + pristine_git_object: 22023e8b19692df969693b7a14f8cf6e0143859f + docs/models/deltamessage.md: + id: 6c5ed6b60968 + last_write_checksum: sha1:00052476b9b2474dbc149f18dd18c71c86d0fc74 + pristine_git_object: e0ee575f3fce7c312114ce8c5390efc5c4854952 + docs/models/deltamessagecontent.md: + id: 7307bedc8733 + last_write_checksum: sha1:a1211b8cb576ad1358e68983680ee326c3920a5e + pristine_git_object: 8142772d7ea33ad8a75cf9cf822564ba3f630de2 + docs/models/fimcompletionrequest.md: + id: b44677ecc293 + last_write_checksum: sha1:24bcb54d39b3fabd487549a27b4c0a65dd5ffe50 + pristine_git_object: fde0b625c29340e8dce1eb3026ce644b1885e53a + docs/models/fimcompletionrequeststop.md: + id: ea5475297a83 + last_write_checksum: sha1:a6cdb4bda01ac58016a71f35da48a5d10df11623 + pristine_git_object: a0dbb00a82a03acc8b62b81d7597722a6ca46118 + docs/models/fimcompletionresponse.md: + id: 050d62ba2fac + last_write_checksum: sha1:a6101a69e83b7a5bcf96ec77ba1cab8748f734f4 + pristine_git_object: cd62d0349503fd8b13582d0ba47ab9cff40f6b28 + docs/models/fimcompletionstreamrequest.md: + id: c881d7e27637 + last_write_checksum: sha1:f8755bc554dd44568c42eb5b6dde04db464647ab + pristine_git_object: ba62d854f030390418597cbd8febae0e1ce27ea8 + docs/models/fimcompletionstreamrequeststop.md: + id: c97a11b764e9 + last_write_checksum: sha1:958d5087050fdeb128745884ebcf565b4fdc3886 + pristine_git_object: 5a9e2ff020d4939f7fd42c0673ea7bdd16cca99d + docs/models/function.md: + id: 416a80fba031 + last_write_checksum: sha1:a9485076d430a7753558461ce87bf42d09e34511 + pristine_git_object: b2bdb3fe82520ea79d0cf1a10ee41c844f90b859 + docs/models/functioncall.md: + id: a78cd1d7f605 + last_write_checksum: sha1:65bf78744b8531cdefb6a288f1af5cbf9d9e2395 + pristine_git_object: 7ccd90dca4868db9b6e178712f95d375210013c8 + docs/models/functionname.md: + id: 4b3bd62c0f26 + last_write_checksum: sha1:754fe32bdffe53c1057b302702f5516f4e551cfb + pristine_git_object: 87d7b4852de629015166605b273deb9341202dc0 + docs/models/imagedetail.md: + id: f8217529b496 + last_write_checksum: sha1:fdf19ac9459f64616240955cb81a84ef03e775c8 + pristine_git_object: 1e5ba3fd405a14e5e2872cc85504584dca19b726 + docs/models/imageurl.md: + id: e75dd23cec1d + last_write_checksum: sha1:a5cf621ce58a9cc7c96afa7de53367eac7b4cb0b + pristine_git_object: 6358e0acb2dea4816203413842243704ca955783 + docs/models/imageurlchunk.md: + id: 4407097bfff3 + last_write_checksum: sha1:30b72826963e22cadf76ac0b7604288dbc4fb943 + pristine_git_object: a84dac32b99390e3fd0559714ca43795742192c6 + docs/models/imageurlunion.md: + id: 9d3c691a9db0 + last_write_checksum: sha1:4e32bcd7d44746d2ddbfafbef96152bb2bdb2a15 + pristine_git_object: db97130f26199dcb354ecb7469d09530b035daa2 + docs/models/jsonschema.md: + id: a6b15ed6fac8 + last_write_checksum: sha1:523465666ad3c292252b3fe60f345c7ffb29053f + pristine_git_object: 7ff7c070353c58290416aff5b01d1dfc43905269 + docs/models/loc.md: + id: b071d5a509cc + last_write_checksum: sha1:09a04749333ab50ae806c3ac6adcaa90d54df0f1 + pristine_git_object: d6094ac2c6e0326c039dad2f6b89158694ef6aa7 + docs/models/mistralpromptmode.md: + id: d17d5db4d3b6 + last_write_checksum: sha1:abcb7205c5086169c7d9449d15ac142448a7d258 + pristine_git_object: c3409d03b9646e21a3793372d06dcae6fef95463 + docs/models/prediction.md: + id: 3c70b2262201 + last_write_checksum: sha1:ca8a77219e6113f2358a5363e935288d90df0725 + pristine_git_object: fae3c1ca4ba2c2ddb3b7de401ecdc8d56dcc7740 + docs/models/referencechunk.md: + id: 07895f9debfd + last_write_checksum: sha1:4384049375a2566c7567599f97ce1ec19e9f6276 + pristine_git_object: d847e24845a399c7ca93d54701832fb65e01b3ab + docs/models/responseformat.md: + id: 50a1e4140614 + last_write_checksum: sha1:e877b2e81470ef5eec5675dfb91a47e74d5d3add + pristine_git_object: 5cab22f2bf1c412699f6a7ed18ef801ecbc3ee4b + docs/models/responseformats.md: + id: cf1f250b82db + last_write_checksum: sha1:105e1f9181913104b554051838cbdd0f728aa2c4 + pristine_git_object: 2f5f1e5511b048323fee18a0ffdd506fe2b3d56f + docs/models/security.md: + id: 452e4d4eb67a + last_write_checksum: sha1:ce2871b49c1632d50e22d0b1ebe4999021d52313 + pristine_git_object: c698674c513f5b20c04f629e50154e67977275f7 + docs/models/systemmessage.md: + id: fdb7963e1cdf + last_write_checksum: sha1:c7603c5ce77ba2bcbda9eff65eeafdb1e9ecbec7 + pristine_git_object: 10bda10f921fb5d66c1606ff18e654b4e78ab197 + docs/models/systemmessagecontent.md: + id: 94a56febaeda + last_write_checksum: sha1:6cb10b4b860b4204df57a29c650c85c826395aeb + pristine_git_object: 0c87baf3c2fade64a2738a9a4b3ce19647e5dc9a + docs/models/systemmessagecontentchunks.md: + id: cea1c19e9d7a + last_write_checksum: sha1:986aec0f8098158515bbccd0c22e0b3d4151bb32 + pristine_git_object: 40030c170746d9953d25b979ab7e6f522018e230 + docs/models/textchunk.md: + id: 6cd12e0ef110 + last_write_checksum: sha1:aa448d4937c0c1cd562621f0a9080aa0dc6e4bd1 + pristine_git_object: b266619dcb57222ec343f373c43b2b5cef5b8b93 + docs/models/thinkchunk.md: + id: bca24d7153f6 + last_write_checksum: sha1:2b8ff7737fa7255673ca31da7cb2e6803fce9e02 + pristine_git_object: b07f598ebc5f0e9c041186c081dc98bc21104bdb + docs/models/thinking.md: + id: 07234f8dd364 + last_write_checksum: sha1:a5962d1615b57996730da19e59fbfaa684321442 + pristine_git_object: c7a0d5c9811ea37aaf9e16b6e93c833ab979573f + docs/models/tool.md: + id: 8966139dbeed + last_write_checksum: sha1:1725bf53fc9f1ca3f332322d91de24c9d58adc6a + pristine_git_object: fb661f72887271d5bb470e4edf025a32b00ade17 + docs/models/toolcall.md: + id: 80892ea1a051 + last_write_checksum: sha1:cb27b9d36cfe6227978c7a7a01b1349b6bac99d9 + pristine_git_object: 3819236b9f3eee2f6878818cfbbe2817e97f7de2 + docs/models/toolchoice.md: + id: "097076343426" + last_write_checksum: sha1:25b33b34da02c3b46349dc8b6223f9ae18370d16 + pristine_git_object: 373046bbbc834169293b4f4ae8b2e238f952ddde + docs/models/toolchoiceenum.md: + id: 15410de51ffc + last_write_checksum: sha1:ca0cf9bf128bebc8faedd9333cc6a56b30f58130 + pristine_git_object: 0be3d6c54b13a8bf30773398a2c12e0d30d3ae58 + docs/models/toolmessage.md: + id: 0553747c37a1 + last_write_checksum: sha1:ac61e644ba7c6da607cb479eafd1db78d8e8012e + pristine_git_object: 7201481e61e269b238887deec30c03f7e16c53d7 + docs/models/toolmessagecontent.md: + id: f0522d2d3c93 + last_write_checksum: sha1:783769c0200baa1b6751327aa3e009fa83da72ee + pristine_git_object: 5c76091fbd2c8e0d768921fab19c7b761df73411 + docs/models/tooltypes.md: + id: adb50fe63ea2 + last_write_checksum: sha1:f224c3d8732450b9c969b3e04027b7df7892694c + pristine_git_object: 84e49253c9b9bd1bd314e2a126106404cbb52f16 + docs/models/usageinfo.md: + id: ec6fe65028a9 + last_write_checksum: sha1:cf71fb9676d870eba7c4d10a69636e1db4054adc + pristine_git_object: f5204ac94a4d6191839031c66c5a9bc0124a1f35 + docs/models/usermessage.md: + id: ed66d7a0f80b + last_write_checksum: sha1:f0ed7d9cb7264f1d9e4a9190772df3f15e25346c + pristine_git_object: e7a932ed71496fa7cc358388c650d25f166f27a4 + docs/models/usermessagecontent.md: + id: 52c072c851e8 + last_write_checksum: sha1:1de02bcf7082768ebe1bb912fdbebbec5a577b5a + pristine_git_object: 8350f9e8f8996c136093e38760990f62fd01f8cf + docs/models/utils/retryconfig.md: + id: 4343ac43161c + last_write_checksum: sha1:562c0f21e308ad10c27f85f75704c15592c6929d + pristine_git_object: 69dd549ec7f5f885101d08dd502e25748183aebf + docs/models/validationerror.md: + id: 304bdf06ef8b + last_write_checksum: sha1:1889f608099577e6a116c14b211a6811d6b22786 + pristine_git_object: 7a1654a1a5cfb3ab92360b361e8e962bf2db4582 + py.typed: + id: 258c3ed47ae4 + last_write_checksum: sha1:8efc425ffe830805ffcc0f3055871bdcdc542c60 + pristine_git_object: 3e38f1a929f7d6b1d6de74604aa87e3d8f010544 + pylintrc: + id: 7ce8b9f946e6 + last_write_checksum: sha1:8f871a5aac4b10bff724c9d91b8d7496eb1fbdde + pristine_git_object: 0391ac11bdc5526b697b69d047d568a611ce87d0 + scripts/prepare_readme.py: + id: e0c5957a6035 + last_write_checksum: sha1:eb988bc0e00ed4bb14e9a3572845af14f06c9b42 + pristine_git_object: ae27b555c05c3c9f35d84e8bbe6a7c9f80cf94b2 + scripts/publish.sh: + id: fe273b08f514 + last_write_checksum: sha1:b290b25b36dca3d5eb1a2e66a2e1bcf2e7326cf3 + pristine_git_object: c35748f360329c2bc370e9b189f49b1a360b2c48 + src/mistralai/gcp/client/__init__.py: + id: 4f63decd432e + last_write_checksum: sha1:da077c0bdfcef64a4a5aea91a17292f72fa2b088 + pristine_git_object: 833c68cd526fe34aab2b7e7c45f974f7f4b9e120 + src/mistralai/gcp/client/_hooks/__init__.py: + id: adcb191838d1 + last_write_checksum: sha1:e3111289afd28ad557c21d9e2f918caabfb7037d + pristine_git_object: 2ee66cdd592fe41731c24ddd407c8ca31c50aec1 + src/mistralai/gcp/client/_hooks/sdkhooks.py: + id: 7e23394c3f65 + last_write_checksum: sha1:4a03a16da35168f25ed0cccfdb0d4c4d86bbe242 + pristine_git_object: 2af4deeda8055f4c57c0c7f00a7b79033435cf34 + src/mistralai/gcp/client/_hooks/types.py: + id: 4f37fd18bfd9 + last_write_checksum: sha1:2b295cc28d5fa2c79495510c8b97a1ea60f993e0 + pristine_git_object: ea95bed210db9180824efddfb1b3e47f5bf96489 + src/mistralai/gcp/client/_version.py: + id: f87319e32c7b + last_write_checksum: sha1:05656d6552e7c9cc97c9bbe6483ee906050d28ea + pristine_git_object: 7415341f2a4519b074f4b8ffa0f06c5ac2ac45e3 + src/mistralai/gcp/client/basesdk.py: + id: 4d594572857b + last_write_checksum: sha1:d8ef9e2f4fa97d402eb9f5472ceb80fb39693991 + pristine_git_object: b3edcb0aca1882d0cbe4d499cfba9cb5464c5b58 + src/mistralai/gcp/client/chat.py: + id: 4c41f05f786e + last_write_checksum: sha1:60b2697e2ecfb62eebed910007e62ab1df565eec + pristine_git_object: 925d69eda2fdac458045cc12327ca72997e07600 + src/mistralai/gcp/client/errors/__init__.py: + id: c51c8ed21629 + last_write_checksum: sha1:29f08ad600a712ff572843a250839ef92efac19b + pristine_git_object: 00c8ee0031486b5416bb6745397c463e1a5dbba6 + src/mistralai/gcp/client/errors/httpvalidationerror.py: + id: b0e25f1c36bd + last_write_checksum: sha1:c863914ed6704ee6c3ad99a77d8b1e742de069d0 + pristine_git_object: 598068197b9ed7e7756de01325f7967a719e46ea + src/mistralai/gcp/client/errors/mistralgcperror.py: + id: 9a9cad8f5d36 + last_write_checksum: sha1:7267c829a842a94c5b84ac248a1610ce45f3db4e + pristine_git_object: 9de91bf2a4abf8b0d0922eb6062fe2ab817a8aee + src/mistralai/gcp/client/errors/no_response_error.py: + id: 2d3e5fe56122 + last_write_checksum: sha1:7f326424a7d5ae1bcd5c89a0d6b3dbda9138942f + pristine_git_object: 1deab64bc43e1e65bf3c412d326a4032ce342366 + src/mistralai/gcp/client/errors/responsevalidationerror.py: + id: 98f7bac284be + last_write_checksum: sha1:1b835d2ce8754b22d5fa269077d7a2eec11d7f29 + pristine_git_object: e8bd83c19b0629bb0ddf7a240e9b8371cb33fff3 + src/mistralai/gcp/client/errors/sdkerror.py: + id: c53aee73c8e1 + last_write_checksum: sha1:080933e9f354b675988a132813f23e55f9e5db74 + pristine_git_object: 6980924626fa5fbf67fb62a30fd23d5883dbe650 + src/mistralai/gcp/client/fim.py: + id: 13d2d208e0ef + last_write_checksum: sha1:1027165887446ce0764ad542ca52f61b460c71b8 + pristine_git_object: 4202102ae5218784a10ee93ada5a0643d23a1d0c + src/mistralai/gcp/client/httpclient.py: + id: a53dd7be6a4c + last_write_checksum: sha1:5e55338d6ee9f01ab648cad4380201a8a3da7dd7 + pristine_git_object: 89560b566073785535643e694c112bedbd3db13d + src/mistralai/gcp/client/models/__init__.py: + id: d9e976d01972 + last_write_checksum: sha1:97ddfc7f70abd5e1a0b36be6dce209b69e9d5c73 + pristine_git_object: 575f64040c90152e74954b749ea89bce5a07e02e + src/mistralai/gcp/client/models/assistantmessage.py: + id: d39c4bdd289e + last_write_checksum: sha1:c813783bcbeec4e40f12e007d1dde4aed8ec71cf + pristine_git_object: 702ac4708abb95fc18d138500b8353715c2dbc98 + src/mistralai/gcp/client/models/chatcompletionchoice.py: + id: 8e65b56f3e6d + last_write_checksum: sha1:e6d1382e9f880b866130d900fd866997aaf80e45 + pristine_git_object: ae5a2fbf38afbd86233dcaa8aa1c8441f5ed9eba + src/mistralai/gcp/client/models/chatcompletionrequest.py: + id: 4694a31c0003 + last_write_checksum: sha1:80fcbbcde773c22c93cf2db63beef2cfe3777497 + pristine_git_object: 8229c5bb13ded84039f3d8ddb95ac0a9c184e1bd + src/mistralai/gcp/client/models/chatcompletionresponse.py: + id: dd9e4796fca9 + last_write_checksum: sha1:76d7257583389ff5021e320a8f9a45a6deb07c7c + pristine_git_object: 317c4d84e378c14294d58c5aefd8c55ffe28754a + src/mistralai/gcp/client/models/chatcompletionstreamrequest.py: + id: 7294862af8ea + last_write_checksum: sha1:899210f881bdbe0a0d94e29fe7044fabbccc578c + pristine_git_object: 3c228d2e7edf08c36f310e190a8dedc7b4958459 + src/mistralai/gcp/client/models/completionchunk.py: + id: 6b9ed8c30877 + last_write_checksum: sha1:f1f091e94e3c1c1aefd3c3bb60c8de8236ab0ead + pristine_git_object: a0b1ae2fa3109a2c2b76bbc483b691d88dc9a15c + src/mistralai/gcp/client/models/completionevent.py: + id: 3f55c4b8fc75 + last_write_checksum: sha1:66665d921fd27df6ef0efce996a5446e49b989d8 + pristine_git_object: bb1550093ce9adcb9bcd0548b69796e82f4f260b + src/mistralai/gcp/client/models/completionresponsestreamchoice.py: + id: ad9b98ca7e1c + last_write_checksum: sha1:c4f9d733461bdb9a0d6c96e82212de7dddc04ffe + pristine_git_object: e58d4c88009ed3696d2a3a57f3796d8fb067019d + src/mistralai/gcp/client/models/contentchunk.py: + id: 8714d3bf2698 + last_write_checksum: sha1:acab1b53b1d324544c6aa6c4126a3fb5265278d2 + pristine_git_object: 18d481505e17d2125e380d796b0c406b0e66d601 + src/mistralai/gcp/client/models/deltamessage.py: + id: 404fc85f1a4c + last_write_checksum: sha1:982c2d15a570c7f4d5e1c3b012db46ea3bac609b + pristine_git_object: 63e6a7f3e50c138f235f5a36277aa8668f85cef1 + src/mistralai/gcp/client/models/fimcompletionrequest.py: + id: 5b79e2595d31 + last_write_checksum: sha1:80a2e3d5e10c240869cd96c41936d714cf8bf801 + pristine_git_object: e460f76c59315c22c75194936f1f3b232331f83c + src/mistralai/gcp/client/models/fimcompletionresponse.py: + id: 402f602d29b8 + last_write_checksum: sha1:cfe26848c7b14d6e374b7944d7ad44df822990b0 + pristine_git_object: 5b80da3f03e4e99dfca971a53af1cf6472c889bb + src/mistralai/gcp/client/models/fimcompletionstreamrequest.py: + id: 31190cf25070 + last_write_checksum: sha1:a95ab8c20b2fdff48102f08258a556af9f382ffa + pristine_git_object: fffc305499e578f77e42fb7992b59e933ae0ae7c + src/mistralai/gcp/client/models/function.py: + id: 2285a899b32e + last_write_checksum: sha1:6439f7f781174ae56b2b02ccbb4d02b08d8d5a03 + pristine_git_object: 439e831355444e0f9e82d23636651201f0db4bfc + src/mistralai/gcp/client/models/functioncall.py: + id: 17bb51f08e5f + last_write_checksum: sha1:b5fe2f061ea5f47057ee50011babc80de27e0ee6 + pristine_git_object: 0f1b24251ce728b3c2a0fb9e9ca94f90a9c3b7be + src/mistralai/gcp/client/models/functionname.py: + id: 313a6001145f + last_write_checksum: sha1:fe1eefaed314efa788bd15beb63bf6b81abb307e + pristine_git_object: 585b9e39762e49356823e211ad86f701bca389b8 + src/mistralai/gcp/client/models/imagedetail.py: + id: a28b2f3e2cb5 + last_write_checksum: sha1:a4874529961952019eaa86a2fa0989626f537a4c + pristine_git_object: 68ed76080716eb1424b13f182479f57e51a4fabf + src/mistralai/gcp/client/models/imageurl.py: + id: 4e330f3eae74 + last_write_checksum: sha1:6c0bee7d7c765fb2611131c7d270041671b428b8 + pristine_git_object: 903d0a1a45eeb7c5e8cde80f624b6e039de1f4cc + src/mistralai/gcp/client/models/imageurlchunk.py: + id: e68a4a393e9b + last_write_checksum: sha1:eae1d0e69a90b2f7513492e4cd0ed68d647f0b5d + pristine_git_object: 4bec0eec882c1eeee8a80f663ff7d686ca677ea0 + src/mistralai/gcp/client/models/jsonschema.py: + id: 39c6e7d412a0 + last_write_checksum: sha1:19b34a5e3f5c00d1a1b96f91a6e02f5ad12240c7 + pristine_git_object: 684ac09f0460bef1f26bf0030b79bbc7141ab99b + src/mistralai/gcp/client/models/mistralpromptmode.py: + id: 8be4a4a683e4 + last_write_checksum: sha1:c958567e95490abf3941fde69be69733e8afb90e + pristine_git_object: c765e4f1a0b86735255771231377f13d62f3d7a6 + src/mistralai/gcp/client/models/prediction.py: + id: 7a5463285bc8 + last_write_checksum: sha1:67c4a9b06d3e98552409a26960e0afd64f829b53 + pristine_git_object: 2e325289fd6c2a987ad270fd808f7b9a3f423440 + src/mistralai/gcp/client/models/referencechunk.py: + id: 523e477f8725 + last_write_checksum: sha1:aade1dc05c2a2672630eb17626e4f49367d6bfe6 + pristine_git_object: 261c4755641093a38f97b17dce3a387623e69ead + src/mistralai/gcp/client/models/responseformat.py: + id: 06774bb65b42 + last_write_checksum: sha1:7e64de46ef34718003cf0d198868a193f2122178 + pristine_git_object: f3aa9930e0f8a009dac628300d66c6209a538031 + src/mistralai/gcp/client/models/responseformats.py: + id: 18112ad0f6db + last_write_checksum: sha1:a212e85d286b5b49219f57d071a2232ff8b5263b + pristine_git_object: cbf83ce7b54ff8634f741334831807bfb5c98991 + src/mistralai/gcp/client/models/security.py: + id: 7e13bda8273b + last_write_checksum: sha1:7086e929823d4eefe80cc279b605adfc8bbb08aa + pristine_git_object: 10a469b54d5e03873fb7d7d98627f2376c93d484 + src/mistralai/gcp/client/models/systemmessage.py: + id: 6537664d2d1b + last_write_checksum: sha1:779cb07cfd63ebe9eec496177cf1a8f5c077e417 + pristine_git_object: b3795c4bf4e97853979e0042cf4bd151d60ef974 + src/mistralai/gcp/client/models/systemmessagecontentchunks.py: + id: e120a6469c89 + last_write_checksum: sha1:d1f96498cbb540b91425e70ffa33892ff4d1c8cd + pristine_git_object: 8de71c909eda2ed0166a6be8f8ee029956e5766b + src/mistralai/gcp/client/models/textchunk.py: + id: a134f120d4dc + last_write_checksum: sha1:1ccc7d232136d6278d670542d192f36f46862df1 + pristine_git_object: 690322725c0f852a005d08c5b722c41709868b22 + src/mistralai/gcp/client/models/thinkchunk.py: + id: 59a1d1ef2020 + last_write_checksum: sha1:066eeb10de301264e601a9ec64d21e1cc13b0c20 + pristine_git_object: 33ec83949499d99a28c55bb20429ab948bb5b1e8 + src/mistralai/gcp/client/models/tool.py: + id: 4b27d45e56ad + last_write_checksum: sha1:cb0d879a55218fd7753bdd005be8a155982feb8f + pristine_git_object: 670aa81f8767e7c079105cf5995225168b4d6eb6 + src/mistralai/gcp/client/models/toolcall.py: + id: e6c25869a579 + last_write_checksum: sha1:f88e69a8e352025ca4b6897f6c16e1f7e4cd7264 + pristine_git_object: 3ea8e283c8f695bcc1fbc734b0074d37c2efeac8 + src/mistralai/gcp/client/models/toolchoice.py: + id: cb13a9f64c92 + last_write_checksum: sha1:71be72b1aae19aef1f8a461c89b71ad6daa009b7 + pristine_git_object: 6e795fd72792f740c8aa5b4da7d1f516018f2c2e + src/mistralai/gcp/client/models/toolchoiceenum.py: + id: d62e9c92d93c + last_write_checksum: sha1:3dbba9a58c5569aafe115f3f7713a52b01ad8620 + pristine_git_object: 01f6f677b379f9e3c99db9d1ad248cb0033a2804 + src/mistralai/gcp/client/models/toolmessage.py: + id: b3774786c2e9 + last_write_checksum: sha1:3d414da8132467d1472ebe485802ffc78eb6f7e4 + pristine_git_object: ce160391f37ce3568daf2877f8dc1aa0f3694821 + src/mistralai/gcp/client/models/tooltypes.py: + id: 5926c64f5229 + last_write_checksum: sha1:ffd576511eed9f823c3d67df9fc5574d8d53c54b + pristine_git_object: fd1aa13d7b8c5d9bdb0922e04b8bd653ff843f60 + src/mistralai/gcp/client/models/usageinfo.py: + id: 3aab1af66cff + last_write_checksum: sha1:c0c949ac48ed35efe1e8fbf820b8e390edd9c3ce + pristine_git_object: cb6feb6e8d173d39b828d8f5b38af75173b4f7f2 + src/mistralai/gcp/client/models/usermessage.py: + id: 9cfa7260463e + last_write_checksum: sha1:780984241b84a7dfe1f6ad6eccace1204bfec8bd + pristine_git_object: e237e900421a9e65fd15aede29ade0e510b189f6 + src/mistralai/gcp/client/models/validationerror.py: + id: 6b4f4910ea9c + last_write_checksum: sha1:2792fd656f55519902f37670fb9fb3b43b4aa016 + pristine_git_object: 2d330e9acb579cc4928fa27fdd72288ce8832b8b + src/mistralai/gcp/client/py.typed: + id: 98b8ab80ab0d + last_write_checksum: sha1:8efc425ffe830805ffcc0f3055871bdcdc542c60 + pristine_git_object: 3e38f1a929f7d6b1d6de74604aa87e3d8f010544 + src/mistralai/gcp/client/sdkconfiguration.py: + id: 57be0f79ea1e + last_write_checksum: sha1:0c5905e7c6092f57c15ee4318a85c0985bcc1ccf + pristine_git_object: d56a634f688f6697ba84962381084dc2d0836ac9 + src/mistralai/gcp/client/types/__init__.py: + id: f7ef15ac2ba1 + last_write_checksum: sha1:140ebdd01a46f92ffc710c52c958c4eba3cf68ed + pristine_git_object: fc76fe0c5505e29859b5d2bb707d48fd27661b8c + src/mistralai/gcp/client/types/basemodel.py: + id: 24babf758c19 + last_write_checksum: sha1:10d84aedeb9d35edfdadf2c3020caa1d24d8b584 + pristine_git_object: a9a640a1a7048736383f96c67c6290c86bf536ee + src/mistralai/gcp/client/utils/__init__.py: + id: a30c8ff6dcff + last_write_checksum: sha1:3ad22a588864c93bd3a16605f669955b5f3b8053 + pristine_git_object: b488c2df1390b22be3050eee72832a91c76d5385 + src/mistralai/gcp/client/utils/annotations.py: + id: 9b2cd4ffc6e9 + last_write_checksum: sha1:a4824ad65f730303e4e1e3ec1febf87b4eb46dbc + pristine_git_object: 12e0aa4f1151bb52474cc02e88397329b90703f6 + src/mistralai/gcp/client/utils/datetimes.py: + id: dd1f0f91ea9d + last_write_checksum: sha1:c721e4123000e7dc61ec52b28a739439d9e17341 + pristine_git_object: a6c52cd61bbe2d459046c940ce5e8c469f2f0664 + src/mistralai/gcp/client/utils/dynamic_imports.py: + id: 0091051cb000 + last_write_checksum: sha1:a1940c63feb8eddfd8026de53384baf5056d5dcc + pristine_git_object: 673edf82a97d0fea7295625d3e092ea369a36b79 + src/mistralai/gcp/client/utils/enums.py: + id: 2341407d5443 + last_write_checksum: sha1:bc8c3c1285ae09ba8a094ee5c3d9c7f41fa1284d + pristine_git_object: 3324e1bc2668c54c4d5f5a1a845675319757a828 + src/mistralai/gcp/client/utils/eventstreaming.py: + id: bb66f0c3e0dc + last_write_checksum: sha1:ffa870a25a7e4e2015bfd7a467ccd3aa1de97f0e + pristine_git_object: f2052fc22d9fd6c663ba3dce019fe234ca37108b + src/mistralai/gcp/client/utils/forms.py: + id: ebf34781d6bd + last_write_checksum: sha1:0ca31459b99f761fcc6d0557a0a38daac4ad50f4 + pristine_git_object: 1e550bd5c2c35d977ddc10f49d77c23cb12c158d + src/mistralai/gcp/client/utils/headers.py: + id: 4c369582903e + last_write_checksum: sha1:7c6df233ee006332b566a8afa9ce9a245941d935 + pristine_git_object: 37864cbbbc40d1a47112bbfdd3ba79568fc8818a + src/mistralai/gcp/client/utils/logger.py: + id: 082d86b60820 + last_write_checksum: sha1:f3fdb154a3f09b8cc43d74c7e9c02f899f8086e4 + pristine_git_object: b661aff65d38b77d035149699aea09b2785d2fc6 + src/mistralai/gcp/client/utils/metadata.py: + id: ff0e832b8b9c + last_write_checksum: sha1:c6a560bd0c63ab158582f34dadb69433ea73b3d4 + pristine_git_object: 173b3e5ce658675c2f504222a56b3daaaa68107d + src/mistralai/gcp/client/utils/queryparams.py: + id: 133b8408e73e + last_write_checksum: sha1:b94c3f314fd3da0d1d215afc2731f48748e2aa59 + pristine_git_object: c04e0db82b68eca041f2cb2614d748fbac80fd41 + src/mistralai/gcp/client/utils/requestbodies.py: + id: 1be13a660954 + last_write_checksum: sha1:41e2d2d2d3ecc394c8122ca4d4b85e1c3e03f054 + pristine_git_object: 1de32b6d26f46590232f398fdba6ce0072f1659c + src/mistralai/gcp/client/utils/retries.py: + id: 542ebd75b79b + last_write_checksum: sha1:471372f5c5d1dd5583239c9cf3c75f1b636e5d87 + pristine_git_object: af07d4e941007af4213c5ec9047ef8a2fca04e5e + src/mistralai/gcp/client/utils/security.py: + id: 5273152365f4 + last_write_checksum: sha1:435dd8b180cefcd733e635b9fa45512da091d9c0 + pristine_git_object: 17996bd54b8624009802fbbdf30bcb4225b8dfed + src/mistralai/gcp/client/utils/serializers.py: + id: a7836e553d41 + last_write_checksum: sha1:ce1d8d7f500a9ccba0aeca5057cee9c271f4dfd7 + pristine_git_object: 14321eb479de81d0d9580ec8291e0ff91bf29e57 + src/mistralai/gcp/client/utils/unions.py: + id: 8abba1cf1b6d + last_write_checksum: sha1:6e38049f323e0b5fb4bd0e88ab51ec447197ccb0 + pristine_git_object: a227f4e87be22fce682fcae5813b71835199ec5e + src/mistralai/gcp/client/utils/unmarshal_json_response.py: + id: d972d22cf934 + last_write_checksum: sha1:5c75fb4ee04ae80a350ceb96abf4e1fdb255ee6c + pristine_git_object: ead3e5a00171b3a97af5112b6cd9ece698ce74f5 + src/mistralai/gcp/client/utils/url.py: + id: 0d311bbcb8f8 + last_write_checksum: sha1:6479961baa90432ca25626f8e40a7bbc32e73b41 + pristine_git_object: c78ccbae426ce6d385709d97ce0b1c2813ea2418 + src/mistralai/gcp/client/utils/values.py: + id: 328207e9ae81 + last_write_checksum: sha1:acaa178a7c41ddd000f58cc691e4632d925b2553 + pristine_git_object: dae01a44384ac3bc13ae07453a053bf6c898ebe3 +examples: + stream_chat: + speakeasy-default-stream-chat: + requestBody: + application/json: {"model": "mistral-large-latest", "stream": true, "messages": [{"role": "user", "content": "Who is the best French painter? Answer in one short sentence."}], "response_format": {"type": "text"}} + responses: + "422": + application/json: {} + chat_completion_v1_chat_completions_post: + speakeasy-default-chat-completion-v1-chat-completions-post: + requestBody: + application/json: {"model": "mistral-large-latest", "stream": false, "messages": [{"role": "user", "content": "Who is the best French painter? Answer in one short sentence."}], "response_format": {"type": "text"}} + responses: + "200": + application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}, "created": 1702256327, "choices": []} + "422": + application/json: {} + stream_fim: + speakeasy-default-stream-fim: + requestBody: + application/json: {"model": "codestral-latest", "top_p": 1, "stream": true, "prompt": "def", "suffix": "return a+b"} + responses: + "422": + application/json: {} + fim_completion_v1_fim_completions_post: + userExample: + requestBody: + application/json: {"model": "codestral-latest", "top_p": 1, "stream": false, "prompt": "def", "suffix": "return a+b"} + responses: + "200": + application/json: {"id": "447e3e0d457e42e98248b5d2ef52a2a3", "object": "chat.completion", "model": "codestral-2508", "usage": {"prompt_tokens": 8, "completion_tokens": 91, "total_tokens": 99}, "created": 1759496862, "choices": [{"index": 0, "message": {"role": "assistant", "content": "add_numbers(a: int, b: int) -> int:\n \"\"\"\n You are given two integers `a` and `b`. Your task is to write a function that\n returns the sum of these two integers. The function should be implemented in a\n way that it can handle very large integers (up to 10^18). As a reminder, your\n code has to be in python\n \"\"\"\n", "tool_calls": null, "prefix": false}, "finish_reason": "stop"}]} +examplesVersion: 1.0.2 +generatedTests: {} +generatedFiles: + - .gitattributes + - .vscode/settings.json + - docs/models/arguments.md + - docs/models/assistantmessage.md + - docs/models/assistantmessagecontent.md + - docs/models/assistantmessagerole.md + - docs/models/chatcompletionchoice.md + - docs/models/chatcompletionchoicefinishreason.md + - docs/models/chatcompletionrequest.md + - docs/models/chatcompletionrequestmessages.md + - docs/models/chatcompletionrequeststop.md + - docs/models/chatcompletionrequesttoolchoice.md + - docs/models/chatcompletionresponse.md + - docs/models/chatcompletionstreamrequest.md + - docs/models/chatcompletionstreamrequesttoolchoice.md + - docs/models/completionchunk.md + - docs/models/completionevent.md + - docs/models/completionresponsestreamchoice.md + - docs/models/content.md + - docs/models/contentchunk.md + - docs/models/deltamessage.md + - docs/models/fimcompletionrequest.md + - docs/models/fimcompletionrequeststop.md + - docs/models/fimcompletionresponse.md + - docs/models/fimcompletionstreamrequest.md + - docs/models/fimcompletionstreamrequeststop.md + - docs/models/finishreason.md + - docs/models/function.md + - docs/models/functioncall.md + - docs/models/functionname.md + - docs/models/httpvalidationerror.md + - docs/models/imageurl.md + - docs/models/imageurlchunk.md + - docs/models/imageurlchunkimageurl.md + - docs/models/imageurlchunktype.md + - docs/models/jsonschema.md + - docs/models/loc.md + - docs/models/messages.md + - docs/models/mistralpromptmode.md + - docs/models/prediction.md + - docs/models/referencechunk.md + - docs/models/referencechunktype.md + - docs/models/responseformat.md + - docs/models/responseformats.md + - docs/models/role.md + - docs/models/security.md + - docs/models/stop.md + - docs/models/systemmessage.md + - docs/models/systemmessagecontent.md + - docs/models/systemmessagecontentchunks.md + - docs/models/textchunk.md + - docs/models/thinkchunk.md + - docs/models/thinkchunktype.md + - docs/models/thinking.md + - docs/models/tool.md + - docs/models/toolcall.md + - docs/models/toolchoice.md + - docs/models/toolchoiceenum.md + - docs/models/toolmessage.md + - docs/models/toolmessagecontent.md + - docs/models/toolmessagerole.md + - docs/models/tooltypes.md + - docs/models/type.md + - docs/models/usageinfo.md + - docs/models/usermessage.md + - docs/models/usermessagecontent.md + - docs/models/usermessagerole.md + - docs/models/utils/retryconfig.md + - docs/models/validationerror.md + - py.typed + - pylintrc + - scripts/prepare_readme.py + - scripts/publish.sh + - src/mistralai_gcp/__init__.py + - src/mistralai_gcp/_hooks/__init__.py + - src/mistralai_gcp/_hooks/sdkhooks.py + - src/mistralai_gcp/_hooks/types.py + - src/mistralai_gcp/_version.py + - src/mistralai_gcp/basesdk.py + - src/mistralai_gcp/chat.py + - src/mistralai_gcp/fim.py + - src/mistralai_gcp/httpclient.py + - src/mistralai_gcp/models/__init__.py + - src/mistralai_gcp/models/assistantmessage.py + - src/mistralai_gcp/models/chatcompletionchoice.py + - src/mistralai_gcp/models/chatcompletionrequest.py + - src/mistralai_gcp/models/chatcompletionresponse.py + - src/mistralai_gcp/models/chatcompletionstreamrequest.py + - src/mistralai_gcp/models/completionchunk.py + - src/mistralai_gcp/models/completionevent.py + - src/mistralai_gcp/models/completionresponsestreamchoice.py + - src/mistralai_gcp/models/contentchunk.py + - src/mistralai_gcp/models/deltamessage.py + - src/mistralai_gcp/models/fimcompletionrequest.py + - src/mistralai_gcp/models/fimcompletionresponse.py + - src/mistralai_gcp/models/fimcompletionstreamrequest.py + - src/mistralai_gcp/models/function.py + - src/mistralai_gcp/models/functioncall.py + - src/mistralai_gcp/models/functionname.py + - src/mistralai_gcp/models/httpvalidationerror.py + - src/mistralai_gcp/models/imageurl.py + - src/mistralai_gcp/models/imageurlchunk.py + - src/mistralai_gcp/models/jsonschema.py + - src/mistralai_gcp/models/mistralgcperror.py + - src/mistralai_gcp/models/mistralpromptmode.py + - src/mistralai_gcp/models/no_response_error.py + - src/mistralai_gcp/models/prediction.py + - src/mistralai_gcp/models/referencechunk.py + - src/mistralai_gcp/models/responseformat.py + - src/mistralai_gcp/models/responseformats.py + - src/mistralai_gcp/models/responsevalidationerror.py + - src/mistralai_gcp/models/sdkerror.py + - src/mistralai_gcp/models/security.py + - src/mistralai_gcp/models/systemmessage.py + - src/mistralai_gcp/models/systemmessagecontentchunks.py + - src/mistralai_gcp/models/textchunk.py + - src/mistralai_gcp/models/thinkchunk.py + - src/mistralai_gcp/models/tool.py + - src/mistralai_gcp/models/toolcall.py + - src/mistralai_gcp/models/toolchoice.py + - src/mistralai_gcp/models/toolchoiceenum.py + - src/mistralai_gcp/models/toolmessage.py + - src/mistralai_gcp/models/tooltypes.py + - src/mistralai_gcp/models/usageinfo.py + - src/mistralai_gcp/models/usermessage.py + - src/mistralai_gcp/models/validationerror.py + - src/mistralai_gcp/py.typed + - src/mistralai_gcp/sdkconfiguration.py + - src/mistralai_gcp/types/__init__.py + - src/mistralai_gcp/types/basemodel.py + - src/mistralai_gcp/utils/__init__.py + - src/mistralai_gcp/utils/annotations.py + - src/mistralai_gcp/utils/datetimes.py + - src/mistralai_gcp/utils/enums.py + - src/mistralai_gcp/utils/eventstreaming.py + - src/mistralai_gcp/utils/forms.py + - src/mistralai_gcp/utils/headers.py + - src/mistralai_gcp/utils/logger.py + - src/mistralai_gcp/utils/metadata.py + - src/mistralai_gcp/utils/queryparams.py + - src/mistralai_gcp/utils/requestbodies.py + - src/mistralai_gcp/utils/retries.py + - src/mistralai_gcp/utils/security.py + - src/mistralai_gcp/utils/serializers.py + - src/mistralai_gcp/utils/unmarshal_json_response.py + - src/mistralai_gcp/utils/url.py + - src/mistralai_gcp/utils/values.py diff --git a/packages/gcp/.speakeasy/gen.yaml b/packages/gcp/.speakeasy/gen.yaml new file mode 100644 index 00000000..35a47062 --- /dev/null +++ b/packages/gcp/.speakeasy/gen.yaml @@ -0,0 +1,88 @@ +configVersion: 2.0.0 +generation: + sdkClassName: MistralGCP + maintainOpenAPIOrder: true + usageSnippets: + optionalPropertyRendering: withExample + sdkInitStyle: constructor + useClassNamesForArrayFields: true + fixes: + nameResolutionDec2023: true + nameResolutionFeb2025: true + parameterOrderingFeb2024: true + requestResponseComponentNamesFeb2024: true + securityFeb2025: true + sharedErrorComponentsApr2025: true + sharedNestedComponentsJan2026: true + nameOverrideFeb2026: true + methodSignaturesApr2024: true + auth: + oAuth2ClientCredentialsEnabled: true + oAuth2PasswordEnabled: false + hoistGlobalSecurity: true + schemas: + allOfMergeStrategy: shallowMerge + requestBodyFieldName: "" + versioningStrategy: automatic + persistentEdits: {} + tests: + generateTests: true + generateNewTests: false + skipResponseBodyAssertions: false +python: + version: 2.0.0rc1 + additionalDependencies: + dev: + pytest: ^8.2.2 + pytest-asyncio: ^0.23.7 + main: {} + allowedRedefinedBuiltins: + - id + - object + - input + - dir + asyncMode: both + authors: + - Mistral + baseErrorName: MistralGCPError + clientServerStatusCodesAsErrors: true + constFieldCasing: normal + defaultErrorName: SDKError + description: Python Client SDK for the Mistral AI API in GCP. + enableCustomCodeRegions: false + enumFormat: union + fixFlags: + asyncPaginationSep2025: true + conflictResistantModelImportsFeb2026: true + responseRequiredSep2024: true + flatAdditionalProperties: true + flattenGlobalSecurity: true + flattenRequests: true + flatteningOrder: parameters-first + forwardCompatibleEnumsByDefault: true + forwardCompatibleUnionsByDefault: tagged-only + imports: + option: openapi + paths: + callbacks: "" + errors: errors + operations: "" + shared: "" + webhooks: "" + inferUnionDiscriminators: true + inputModelSuffix: input + license: "" + maxMethodParams: 999 + methodArguments: infer-optional-args + moduleName: mistralai.gcp.client + multipartArrayFormat: standard + outputModelSuffix: output + packageManager: uv + packageName: mistralai-gcp + preApplyUnionDiscriminators: true + pytestFilterWarnings: [] + pytestTimeout: 0 + responseFormat: flat + sseFlatResponse: false + templateVersion: v2 + useAsyncHooks: false diff --git a/packages/gcp/CONTRIBUTING.md b/packages/gcp/CONTRIBUTING.md new file mode 100644 index 00000000..d585717f --- /dev/null +++ b/packages/gcp/CONTRIBUTING.md @@ -0,0 +1,26 @@ +# Contributing to This Repository + +Thank you for your interest in contributing to this repository. Please note that this repository contains generated code. As such, we do not accept direct changes or pull requests. Instead, we encourage you to follow the guidelines below to report issues and suggest improvements. + +## How to Report Issues + +If you encounter any bugs or have suggestions for improvements, please open an issue on GitHub. When reporting an issue, please provide as much detail as possible to help us reproduce the problem. This includes: + +- A clear and descriptive title +- Steps to reproduce the issue +- Expected and actual behavior +- Any relevant logs, screenshots, or error messages +- Information about your environment (e.g., operating system, software versions) + - For example can be collected using the `npx envinfo` command from your terminal if you have Node.js installed + +## Issue Triage and Upstream Fixes + +We will review and triage issues as quickly as possible. Our goal is to address bugs and incorporate improvements in the upstream source code. Fixes will be included in the next generation of the generated code. + +## Contact + +If you have any questions or need further assistance, please feel free to reach out by opening an issue. + +Thank you for your understanding and cooperation! + +The Maintainers diff --git a/packages/gcp/README.md b/packages/gcp/README.md new file mode 100644 index 00000000..5b66766b --- /dev/null +++ b/packages/gcp/README.md @@ -0,0 +1,428 @@ +# Mistral on GCP Python Client + + +**Prerequisites** + +Before you begin, you will need to create a Google Cloud project and enable the Mistral API. To do this, follow the instructions [here](https://docs.mistral.ai/deployment/cloud/vertex/). + +To run this locally you will also need to ensure you are authenticated with Google Cloud. You can do this by running + +```bash +gcloud auth application-default login +``` + +## SDK Installation + +Install the extras dependencies specific to Google Cloud: + +```bash +pip install mistralai[gcp] +``` + + +## SDK Example Usage + +### Create Chat Completions + +This example shows how to create chat completions. + +The SDK automatically: +- Detects credentials via `google.auth.default()` +- Auto-refreshes tokens when they expire +- Builds the Vertex AI URL from `project_id` and `region` + +```python +# Synchronous Example +import os +from mistralai.gcp.client import MistralGCP + +# The SDK auto-detects credentials and builds the Vertex AI URL +s = MistralGCP( + project_id=os.environ.get("GCP_PROJECT_ID"), # Optional: auto-detected from credentials + region=os.environ.get("GCP_REGION", "us-central1"), +) + +res = s.chat.complete(messages=[ + { + "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", + }, +], model="mistral-small-2503") + +if res is not None: + # handle response + print(res.choices[0].message.content) +``` + +
+ +The same SDK client can also be used to make asynchronous requests by importing asyncio. +```python +# Asynchronous Example +import asyncio +import os +from mistralai.gcp.client import MistralGCP + +async def main(): + # The SDK auto-detects credentials and builds the Vertex AI URL + s = MistralGCP( + project_id=os.environ.get("GCP_PROJECT_ID"), # Optional: auto-detected + region=os.environ.get("GCP_REGION", "us-central1"), + ) + res = await s.chat.complete_async(messages=[ + { + "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", + }, + ], model="mistral-small-2503") + if res is not None: + # handle response + print(res.choices[0].message.content) + +asyncio.run(main()) +``` + + + +## Available Resources and Operations + +### [chat](docs/sdks/chat/README.md) + +* [stream](docs/sdks/chat/README.md#stream) - Stream chat completion +* [complete](docs/sdks/chat/README.md#complete) - Chat Completion + +### [fim](docs/sdks/fim/README.md) + +* [stream](docs/sdks/fim/README.md#stream) - Stream fim completion +* [complete](docs/sdks/fim/README.md#complete) - Fim Completion + + + +## Server-sent event streaming + +[Server-sent events][mdn-sse] are used to stream content from certain +operations. These operations will expose the stream as [Generator][generator] that +can be consumed using a simple `for` loop. The loop will +terminate when the server no longer has any events to send and closes the +underlying connection. + +```python +import os +from mistralai.gcp.client import MistralGCP + +# The SDK auto-detects credentials and builds the Vertex AI URL +s = MistralGCP( + project_id=os.environ.get("GCP_PROJECT_ID"), # Optional: auto-detected + region=os.environ.get("GCP_REGION", "us-central1"), +) + +res = s.chat.stream(messages=[ + { + "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", + }, +], model="mistral-small-2503") + +if res is not None: + for event in res: + # handle event + print(event) + +``` + +[mdn-sse]: https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events +[generator]: https://wiki.python.org/moin/Generators + + + +## Retries + +Some of the endpoints in this SDK support retries. If you use the SDK without any configuration, it will fall back to the default retry strategy provided by the API. However, the default retry strategy can be overridden on a per-operation basis, or across the entire SDK. + +To change the default retry strategy for a single API call, simply provide a `RetryConfig` object to the call: +```python +import os +from mistralai.gcp.client import MistralGCP +from mistralai.gcp.client.utils import BackoffStrategy, RetryConfig + +# The SDK auto-detects credentials and builds the Vertex AI URL +s = MistralGCP( + project_id=os.environ.get("GCP_PROJECT_ID"), # Optional: auto-detected + region=os.environ.get("GCP_REGION", "us-central1"), +) + +res = s.chat.stream( + messages=[ + { + "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", + }, + ], + model="mistral-small-2503", + retries=RetryConfig( + "backoff", + BackoffStrategy(1, 50, 1.1, 100), + False + ) +) + +if res is not None: + for event in res: + # handle event + print(event) + +``` + +If you'd like to override the default retry strategy for all operations that support retries, you can use the `retry_config` optional parameter when initializing the SDK: +```python +import os +from mistralai.gcp.client import MistralGCP +from mistralai.gcp.client.utils import BackoffStrategy, RetryConfig + +# The SDK auto-detects credentials and builds the Vertex AI URL +s = MistralGCP( + project_id=os.environ.get("GCP_PROJECT_ID"), + region=os.environ.get("GCP_REGION", "us-central1"), + retry_config=RetryConfig("backoff", BackoffStrategy(1, 50, 1.1, 100), False), +) + +res = s.chat.stream( + messages=[ + { + "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", + }, + ], + model="mistral-small-2503", +) + +if res is not None: + for event in res: + # handle event + print(event) + +``` + + + +## Error Handling + +Handling errors in this SDK should largely match your expectations. All operations return a response object or raise an error. If Error objects are specified in your OpenAPI Spec, the SDK will raise the appropriate Error type. + +| Error Object | Status Code | Content Type | +| -------------------------- | ----------- | ---------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4xx-5xx | */* | + +### Example + +```python +import os +from mistralai.gcp.client import MistralGCP +from mistralai.gcp.client import models + +# The SDK auto-detects credentials and builds the Vertex AI URL +s = MistralGCP( + project_id=os.environ.get("GCP_PROJECT_ID"), + region=os.environ.get("GCP_REGION", "us-central1"), +) + +res = None +try: + res = s.chat.complete( + messages=[ + { + "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", + }, + ], + model="mistral-small-2503", + ) + +except models.HTTPValidationError as e: + # handle exception + raise(e) +except models.SDKError as e: + # handle exception + raise(e) + +if res is not None: + # handle response + pass + +``` + + + +## Server Selection + +### Override Server URL Per-Client + +The SDK automatically constructs the Vertex AI endpoint from `project_id` and `region`: +```python +import os +from mistralai.gcp.client import MistralGCP + +# The SDK auto-detects credentials and builds the Vertex AI URL +s = MistralGCP( + project_id=os.environ.get("GCP_PROJECT_ID"), # Optional: auto-detected + region=os.environ.get("GCP_REGION", "us-central1"), +) + +res = s.chat.stream( + messages=[ + { + "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", + }, + ], + model="mistral-small-2503", +) + +if res is not None: + for event in res: + # handle event + print(event) + +``` + + + +## Custom HTTP Client + +The Python SDK makes API calls using the [httpx](https://www.python-httpx.org/) HTTP library. In order to provide a convenient way to configure timeouts, cookies, proxies, custom headers, and other low-level configuration, you can initialize the SDK client with your own HTTP client instance. +Depending on whether you are using the sync or async version of the SDK, you can pass an instance of `HttpClient` or `AsyncHttpClient` respectively, which are Protocol's ensuring that the client has the necessary methods to make API calls. +This allows you to wrap the client with your own custom logic, such as adding custom headers, logging, or error handling, or you can just pass an instance of `httpx.Client` or `httpx.AsyncClient` directly. + +For example, you could specify a header for every request that this sdk makes as follows: +```python +import os +from mistralai.gcp.client import MistralGCP +import httpx + +http_client = httpx.Client(headers={"x-custom-header": "someValue"}) +s = MistralGCP( + project_id=os.environ.get("GCP_PROJECT_ID"), + region="us-central1", + client=http_client, +) +``` + +or you could wrap the client with your own custom logic: +```python +from typing import Any, Optional, Union +from mistralai.gcp.client import MistralGCP +from mistralai.gcp.client.httpclient import AsyncHttpClient +import httpx + +class CustomClient(AsyncHttpClient): + client: AsyncHttpClient + + def __init__(self, client: AsyncHttpClient): + self.client = client + + async def send( + self, + request: httpx.Request, + *, + stream: bool = False, + auth: Union[ + httpx._types.AuthTypes, httpx._client.UseClientDefault, None + ] = httpx.USE_CLIENT_DEFAULT, + follow_redirects: Union[ + bool, httpx._client.UseClientDefault + ] = httpx.USE_CLIENT_DEFAULT, + ) -> httpx.Response: + request.headers["Client-Level-Header"] = "added by client" + + return await self.client.send( + request, stream=stream, auth=auth, follow_redirects=follow_redirects + ) + + def build_request( + self, + method: str, + url: httpx._types.URLTypes, + *, + content: Optional[httpx._types.RequestContent] = None, + data: Optional[httpx._types.RequestData] = None, + files: Optional[httpx._types.RequestFiles] = None, + json: Optional[Any] = None, + params: Optional[httpx._types.QueryParamTypes] = None, + headers: Optional[httpx._types.HeaderTypes] = None, + cookies: Optional[httpx._types.CookieTypes] = None, + timeout: Union[ + httpx._types.TimeoutTypes, httpx._client.UseClientDefault + ] = httpx.USE_CLIENT_DEFAULT, + extensions: Optional[httpx._types.RequestExtensions] = None, + ) -> httpx.Request: + return self.client.build_request( + method, + url, + content=content, + data=data, + files=files, + json=json, + params=params, + headers=headers, + cookies=cookies, + timeout=timeout, + extensions=extensions, + ) + +s = MistralGCP( + project_id="", + region="us-central1", + async_client=CustomClient(httpx.AsyncClient()), +) +``` + + + +## Authentication + +### Per-Client Security Schemes + +This SDK supports the following security scheme globally: + +| Name | Type | Scheme | +| --------- | ---- | ----------- | +| `api_key` | http | HTTP Bearer | + +The SDK automatically handles GCP authentication via `google.auth.default()`. Tokens are auto-refreshed when they expire. For example: +```python +import os +from mistralai.gcp.client import MistralGCP + +# The SDK auto-detects credentials and builds the Vertex AI URL +s = MistralGCP( + project_id=os.environ.get("GCP_PROJECT_ID"), # Optional: auto-detected + region=os.environ.get("GCP_REGION", "us-central1"), +) + +res = s.chat.stream( + messages=[ + { + "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", + }, + ], + model="mistral-small-2503", +) + +if res is not None: + for event in res: + # handle event + print(event) + +``` + + + + +# Development + +## Contributions + +While we value open-source contributions to this SDK, this library is generated programmatically. Any manual changes added to internal files will be overwritten on the next generation. +We look forward to hearing your feedback. Feel free to open a PR or an issue with a proof of concept and we'll do our best to include it in a future release. diff --git a/packages/gcp/RELEASES.md b/packages/gcp/RELEASES.md new file mode 100644 index 00000000..2261c857 --- /dev/null +++ b/packages/gcp/RELEASES.md @@ -0,0 +1,31 @@ + + +## 2026-01-12 16:00:24 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.606.10 (2.687.13) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.7.0] packages/mistralai_gcp +### Releases +- [PyPI v1.7.0] https://pypi.org/project/mistralai-gcp/1.7.0 - packages/mistralai_gcp + +## 2026-02-25 17:36:50 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.729.0 (2.841.0) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v2.0.0b1] packages/gcp +### Releases +- [PyPI v2.0.0b1] https://pypi.org/project/mistralai-gcp/2.0.0b1 - packages/gcp + +## 2026-03-02 12:45:41 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.729.0 (2.841.0) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v2.0.0rc1] packages/gcp +### Releases +- [PyPI v2.0.0rc1] https://pypi.org/project/mistralai-gcp/2.0.0rc1 - packages/gcp \ No newline at end of file diff --git a/packages/gcp/USAGE.md b/packages/gcp/USAGE.md new file mode 100644 index 00000000..3156349d --- /dev/null +++ b/packages/gcp/USAGE.md @@ -0,0 +1,61 @@ + +### Create Chat Completions + +This example shows how to create chat completions. + +The SDK automatically: +- Detects credentials via `google.auth.default()` +- Auto-refreshes tokens when they expire +- Builds the Vertex AI URL from `project_id` and `region` + +```python +# Synchronous Example +import os +from mistralai.gcp.client import MistralGCP + +# The SDK auto-detects credentials and builds the Vertex AI URL +s = MistralGCP( + project_id=os.environ.get("GCP_PROJECT_ID"), # Optional: auto-detected from credentials + region=os.environ.get("GCP_REGION", "us-central1"), +) + +res = s.chat.complete(messages=[ + { + "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", + }, +], model="mistral-small-2503") + +if res is not None: + # handle response + print(res.choices[0].message.content) +``` + +
+ +The same SDK client can also be used to make asynchronous requests by importing asyncio. +```python +# Asynchronous Example +import asyncio +import os +from mistralai.gcp.client import MistralGCP + +async def main(): + # The SDK auto-detects credentials and builds the Vertex AI URL + s = MistralGCP( + project_id=os.environ.get("GCP_PROJECT_ID"), # Optional: auto-detected + region=os.environ.get("GCP_REGION", "us-central1"), + ) + res = await s.chat.complete_async(messages=[ + { + "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", + }, + ], model="mistral-small-2503") + if res is not None: + # handle response + print(res.choices[0].message.content) + +asyncio.run(main()) +``` + diff --git a/packages/gcp/docs/errors/httpvalidationerror.md b/packages/gcp/docs/errors/httpvalidationerror.md new file mode 100644 index 00000000..712a148c --- /dev/null +++ b/packages/gcp/docs/errors/httpvalidationerror.md @@ -0,0 +1,8 @@ +# HTTPValidationError + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | +| `detail` | List[[models.ValidationError](../models/validationerror.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/gcp/docs/models/arguments.md b/packages/gcp/docs/models/arguments.md new file mode 100644 index 00000000..2e54e27e --- /dev/null +++ b/packages/gcp/docs/models/arguments.md @@ -0,0 +1,17 @@ +# Arguments + + +## Supported Types + +### `Dict[str, Any]` + +```python +value: Dict[str, Any] = /* values here */ +``` + +### `str` + +```python +value: str = /* values here */ +``` + diff --git a/packages/gcp/docs/models/assistantmessage.md b/packages/gcp/docs/models/assistantmessage.md new file mode 100644 index 00000000..9ef63837 --- /dev/null +++ b/packages/gcp/docs/models/assistantmessage.md @@ -0,0 +1,11 @@ +# AssistantMessage + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `role` | *Optional[Literal["assistant"]]* | :heavy_minus_sign: | N/A | +| `content` | [OptionalNullable[models.AssistantMessageContent]](../models/assistantmessagecontent.md) | :heavy_minus_sign: | N/A | +| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | +| `prefix` | *Optional[bool]* | :heavy_minus_sign: | Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message. | \ No newline at end of file diff --git a/packages/gcp/docs/models/assistantmessagecontent.md b/packages/gcp/docs/models/assistantmessagecontent.md new file mode 100644 index 00000000..047b7cf9 --- /dev/null +++ b/packages/gcp/docs/models/assistantmessagecontent.md @@ -0,0 +1,17 @@ +# AssistantMessageContent + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[models.ContentChunk]` + +```python +value: List[models.ContentChunk] = /* values here */ +``` + diff --git a/packages/gcp/docs/models/chatcompletionchoice.md b/packages/gcp/docs/models/chatcompletionchoice.md new file mode 100644 index 00000000..deaa0ea0 --- /dev/null +++ b/packages/gcp/docs/models/chatcompletionchoice.md @@ -0,0 +1,10 @@ +# ChatCompletionChoice + + +## Fields + +| Field | Type | Required | Description | Example | +| ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | +| `index` | *int* | :heavy_check_mark: | N/A | 0 | +| `message` | [models.AssistantMessage](../models/assistantmessage.md) | :heavy_check_mark: | N/A | | +| `finish_reason` | [models.ChatCompletionChoiceFinishReason](../models/chatcompletionchoicefinishreason.md) | :heavy_check_mark: | N/A | stop | \ No newline at end of file diff --git a/packages/gcp/docs/models/chatcompletionchoicefinishreason.md b/packages/gcp/docs/models/chatcompletionchoicefinishreason.md new file mode 100644 index 00000000..b2f15ecb --- /dev/null +++ b/packages/gcp/docs/models/chatcompletionchoicefinishreason.md @@ -0,0 +1,12 @@ +# ChatCompletionChoiceFinishReason + + +## Values + +| Name | Value | +| -------------- | -------------- | +| `STOP` | stop | +| `LENGTH` | length | +| `MODEL_LENGTH` | model_length | +| `ERROR` | error | +| `TOOL_CALLS` | tool_calls | \ No newline at end of file diff --git a/packages/gcp/docs/models/chatcompletionrequest.md b/packages/gcp/docs/models/chatcompletionrequest.md new file mode 100644 index 00000000..8dbd4a82 --- /dev/null +++ b/packages/gcp/docs/models/chatcompletionrequest.md @@ -0,0 +1,25 @@ +# ChatCompletionRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-large-latest | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | +| `stop` | [Optional[models.ChatCompletionRequestStop]](../models/chatcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | +| `messages` | List[[models.ChatCompletionRequestMessage](../models/chatcompletionrequestmessage.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | **Example 1:** {
"type": "text"
}
**Example 2:** {
"type": "json_object"
}
**Example 3:** {
"type": "json_schema",
"json_schema": {
"schema": {
"properties": {
"name": {
"title": "Name",
"type": "string"
},
"authors": {
"items": {
"type": "string"
},
"title": "Authors",
"type": "array"
}
},
"required": [
"name",
"authors"
],
"title": "Book",
"type": "object",
"additionalProperties": false
},
"name": "book",
"strict": true
}
} | +| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. | | +| `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. | | +| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | +| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. | | +| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | \ No newline at end of file diff --git a/packages/gcp/docs/models/chatcompletionrequestmessage.md b/packages/gcp/docs/models/chatcompletionrequestmessage.md new file mode 100644 index 00000000..91e9e062 --- /dev/null +++ b/packages/gcp/docs/models/chatcompletionrequestmessage.md @@ -0,0 +1,29 @@ +# ChatCompletionRequestMessage + + +## Supported Types + +### `models.AssistantMessage` + +```python +value: models.AssistantMessage = /* values here */ +``` + +### `models.SystemMessage` + +```python +value: models.SystemMessage = /* values here */ +``` + +### `models.ToolMessage` + +```python +value: models.ToolMessage = /* values here */ +``` + +### `models.UserMessage` + +```python +value: models.UserMessage = /* values here */ +``` + diff --git a/packages/gcp/docs/models/chatcompletionrequeststop.md b/packages/gcp/docs/models/chatcompletionrequeststop.md new file mode 100644 index 00000000..749296d4 --- /dev/null +++ b/packages/gcp/docs/models/chatcompletionrequeststop.md @@ -0,0 +1,19 @@ +# ChatCompletionRequestStop + +Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[str]` + +```python +value: List[str] = /* values here */ +``` + diff --git a/packages/gcp/docs/models/chatcompletionrequesttoolchoice.md b/packages/gcp/docs/models/chatcompletionrequesttoolchoice.md new file mode 100644 index 00000000..dc82a8ef --- /dev/null +++ b/packages/gcp/docs/models/chatcompletionrequesttoolchoice.md @@ -0,0 +1,19 @@ +# ChatCompletionRequestToolChoice + +Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. + + +## Supported Types + +### `models.ToolChoice` + +```python +value: models.ToolChoice = /* values here */ +``` + +### `models.ToolChoiceEnum` + +```python +value: models.ToolChoiceEnum = /* values here */ +``` + diff --git a/packages/gcp/docs/models/chatcompletionresponse.md b/packages/gcp/docs/models/chatcompletionresponse.md new file mode 100644 index 00000000..a0465ffb --- /dev/null +++ b/packages/gcp/docs/models/chatcompletionresponse.md @@ -0,0 +1,13 @@ +# ChatCompletionResponse + + +## Fields + +| Field | Type | Required | Description | Example | +| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | cmpl-e5cc70bb28c444948073e77776eb30ef | +| `object` | *str* | :heavy_check_mark: | N/A | chat.completion | +| `model` | *str* | :heavy_check_mark: | N/A | mistral-small-latest | +| `usage` | [models.UsageInfo](../models/usageinfo.md) | :heavy_check_mark: | N/A | | +| `created` | *int* | :heavy_check_mark: | N/A | 1702256327 | +| `choices` | List[[models.ChatCompletionChoice](../models/chatcompletionchoice.md)] | :heavy_check_mark: | N/A | | \ No newline at end of file diff --git a/packages/gcp/docs/models/chatcompletionstreamrequest.md b/packages/gcp/docs/models/chatcompletionstreamrequest.md new file mode 100644 index 00000000..db76b6c8 --- /dev/null +++ b/packages/gcp/docs/models/chatcompletionstreamrequest.md @@ -0,0 +1,25 @@ +# ChatCompletionStreamRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-large-latest | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `stop` | [Optional[models.ChatCompletionStreamRequestStop]](../models/chatcompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | +| `messages` | List[[models.ChatCompletionStreamRequestMessage](../models/chatcompletionstreamrequestmessage.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | **Example 1:** {
"type": "text"
}
**Example 2:** {
"type": "json_object"
}
**Example 3:** {
"type": "json_schema",
"json_schema": {
"schema": {
"properties": {
"name": {
"title": "Name",
"type": "string"
},
"authors": {
"items": {
"type": "string"
},
"title": "Authors",
"type": "array"
}
},
"required": [
"name",
"authors"
],
"title": "Book",
"type": "object",
"additionalProperties": false
},
"name": "book",
"strict": true
}
} | +| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. | | +| `tool_choice` | [Optional[models.ChatCompletionStreamRequestToolChoice]](../models/chatcompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. | | +| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | +| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. | | +| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | \ No newline at end of file diff --git a/packages/gcp/docs/models/chatcompletionstreamrequestmessage.md b/packages/gcp/docs/models/chatcompletionstreamrequestmessage.md new file mode 100644 index 00000000..2e4e93ac --- /dev/null +++ b/packages/gcp/docs/models/chatcompletionstreamrequestmessage.md @@ -0,0 +1,29 @@ +# ChatCompletionStreamRequestMessage + + +## Supported Types + +### `models.AssistantMessage` + +```python +value: models.AssistantMessage = /* values here */ +``` + +### `models.SystemMessage` + +```python +value: models.SystemMessage = /* values here */ +``` + +### `models.ToolMessage` + +```python +value: models.ToolMessage = /* values here */ +``` + +### `models.UserMessage` + +```python +value: models.UserMessage = /* values here */ +``` + diff --git a/packages/gcp/docs/models/chatcompletionstreamrequeststop.md b/packages/gcp/docs/models/chatcompletionstreamrequeststop.md new file mode 100644 index 00000000..a48460a9 --- /dev/null +++ b/packages/gcp/docs/models/chatcompletionstreamrequeststop.md @@ -0,0 +1,19 @@ +# ChatCompletionStreamRequestStop + +Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[str]` + +```python +value: List[str] = /* values here */ +``` + diff --git a/packages/gcp/docs/models/chatcompletionstreamrequesttoolchoice.md b/packages/gcp/docs/models/chatcompletionstreamrequesttoolchoice.md new file mode 100644 index 00000000..43f3ca38 --- /dev/null +++ b/packages/gcp/docs/models/chatcompletionstreamrequesttoolchoice.md @@ -0,0 +1,19 @@ +# ChatCompletionStreamRequestToolChoice + +Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. + + +## Supported Types + +### `models.ToolChoice` + +```python +value: models.ToolChoice = /* values here */ +``` + +### `models.ToolChoiceEnum` + +```python +value: models.ToolChoiceEnum = /* values here */ +``` + diff --git a/packages/gcp/docs/models/completionchunk.md b/packages/gcp/docs/models/completionchunk.md new file mode 100644 index 00000000..7f8ab5e6 --- /dev/null +++ b/packages/gcp/docs/models/completionchunk.md @@ -0,0 +1,13 @@ +# CompletionChunk + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | +| `id` | *str* | :heavy_check_mark: | N/A | +| `object` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `created` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `model` | *str* | :heavy_check_mark: | N/A | +| `usage` | [Optional[models.UsageInfo]](../models/usageinfo.md) | :heavy_minus_sign: | N/A | +| `choices` | List[[models.CompletionResponseStreamChoice](../models/completionresponsestreamchoice.md)] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/gcp/docs/models/completionevent.md b/packages/gcp/docs/models/completionevent.md new file mode 100644 index 00000000..7a66e8fe --- /dev/null +++ b/packages/gcp/docs/models/completionevent.md @@ -0,0 +1,8 @@ +# CompletionEvent + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------ | ------------------------------------------------------ | ------------------------------------------------------ | ------------------------------------------------------ | +| `data` | [models.CompletionChunk](../models/completionchunk.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/gcp/docs/models/completionresponsestreamchoice.md b/packages/gcp/docs/models/completionresponsestreamchoice.md new file mode 100644 index 00000000..1532c25b --- /dev/null +++ b/packages/gcp/docs/models/completionresponsestreamchoice.md @@ -0,0 +1,10 @@ +# CompletionResponseStreamChoice + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------- | +| `index` | *int* | :heavy_check_mark: | N/A | +| `delta` | [models.DeltaMessage](../models/deltamessage.md) | :heavy_check_mark: | N/A | +| `finish_reason` | [Nullable[models.CompletionResponseStreamChoiceFinishReason]](../models/completionresponsestreamchoicefinishreason.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/gcp/docs/models/completionresponsestreamchoicefinishreason.md b/packages/gcp/docs/models/completionresponsestreamchoicefinishreason.md new file mode 100644 index 00000000..0fece473 --- /dev/null +++ b/packages/gcp/docs/models/completionresponsestreamchoicefinishreason.md @@ -0,0 +1,11 @@ +# CompletionResponseStreamChoiceFinishReason + + +## Values + +| Name | Value | +| ------------ | ------------ | +| `STOP` | stop | +| `LENGTH` | length | +| `ERROR` | error | +| `TOOL_CALLS` | tool_calls | \ No newline at end of file diff --git a/packages/gcp/docs/models/contentchunk.md b/packages/gcp/docs/models/contentchunk.md new file mode 100644 index 00000000..22023e8b --- /dev/null +++ b/packages/gcp/docs/models/contentchunk.md @@ -0,0 +1,23 @@ +# ContentChunk + + +## Supported Types + +### `models.ImageURLChunk` + +```python +value: models.ImageURLChunk = /* values here */ +``` + +### `models.TextChunk` + +```python +value: models.TextChunk = /* values here */ +``` + +### `models.ReferenceChunk` + +```python +value: models.ReferenceChunk = /* values here */ +``` + diff --git a/packages/gcp/docs/models/deltamessage.md b/packages/gcp/docs/models/deltamessage.md new file mode 100644 index 00000000..e0ee575f --- /dev/null +++ b/packages/gcp/docs/models/deltamessage.md @@ -0,0 +1,10 @@ +# DeltaMessage + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | +| `role` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `content` | [OptionalNullable[models.DeltaMessageContent]](../models/deltamessagecontent.md) | :heavy_minus_sign: | N/A | +| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/gcp/docs/models/deltamessagecontent.md b/packages/gcp/docs/models/deltamessagecontent.md new file mode 100644 index 00000000..8142772d --- /dev/null +++ b/packages/gcp/docs/models/deltamessagecontent.md @@ -0,0 +1,17 @@ +# DeltaMessageContent + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[models.ContentChunk]` + +```python +value: List[models.ContentChunk] = /* values here */ +``` + diff --git a/packages/gcp/docs/models/fimcompletionrequest.md b/packages/gcp/docs/models/fimcompletionrequest.md new file mode 100644 index 00000000..fde0b625 --- /dev/null +++ b/packages/gcp/docs/models/fimcompletionrequest.md @@ -0,0 +1,18 @@ +# FIMCompletionRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | ID of the model with FIM to use. | codestral-latest | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | +| `stop` | [Optional[models.FIMCompletionRequestStop]](../models/fimcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | +| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | \ No newline at end of file diff --git a/packages/gcp/docs/models/fimcompletionrequeststop.md b/packages/gcp/docs/models/fimcompletionrequeststop.md new file mode 100644 index 00000000..a0dbb00a --- /dev/null +++ b/packages/gcp/docs/models/fimcompletionrequeststop.md @@ -0,0 +1,19 @@ +# FIMCompletionRequestStop + +Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[str]` + +```python +value: List[str] = /* values here */ +``` + diff --git a/packages/gcp/docs/models/fimcompletionresponse.md b/packages/gcp/docs/models/fimcompletionresponse.md new file mode 100644 index 00000000..cd62d034 --- /dev/null +++ b/packages/gcp/docs/models/fimcompletionresponse.md @@ -0,0 +1,13 @@ +# FIMCompletionResponse + + +## Fields + +| Field | Type | Required | Description | Example | +| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | cmpl-e5cc70bb28c444948073e77776eb30ef | +| `object` | *str* | :heavy_check_mark: | N/A | chat.completion | +| `model` | *str* | :heavy_check_mark: | N/A | codestral-latest | +| `usage` | [models.UsageInfo](../models/usageinfo.md) | :heavy_check_mark: | N/A | | +| `created` | *int* | :heavy_check_mark: | N/A | 1702256327 | +| `choices` | List[[models.ChatCompletionChoice](../models/chatcompletionchoice.md)] | :heavy_check_mark: | N/A | | \ No newline at end of file diff --git a/packages/gcp/docs/models/fimcompletionstreamrequest.md b/packages/gcp/docs/models/fimcompletionstreamrequest.md new file mode 100644 index 00000000..ba62d854 --- /dev/null +++ b/packages/gcp/docs/models/fimcompletionstreamrequest.md @@ -0,0 +1,18 @@ +# FIMCompletionStreamRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | ID of the model with FIM to use. | codestral-latest | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `stop` | [Optional[models.FIMCompletionStreamRequestStop]](../models/fimcompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | +| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | \ No newline at end of file diff --git a/packages/gcp/docs/models/fimcompletionstreamrequeststop.md b/packages/gcp/docs/models/fimcompletionstreamrequeststop.md new file mode 100644 index 00000000..5a9e2ff0 --- /dev/null +++ b/packages/gcp/docs/models/fimcompletionstreamrequeststop.md @@ -0,0 +1,19 @@ +# FIMCompletionStreamRequestStop + +Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[str]` + +```python +value: List[str] = /* values here */ +``` + diff --git a/packages/gcp/docs/models/function.md b/packages/gcp/docs/models/function.md new file mode 100644 index 00000000..b2bdb3fe --- /dev/null +++ b/packages/gcp/docs/models/function.md @@ -0,0 +1,11 @@ +# Function + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `name` | *str* | :heavy_check_mark: | N/A | +| `description` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `strict` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `parameters` | Dict[str, *Any*] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/gcp/docs/models/functioncall.md b/packages/gcp/docs/models/functioncall.md new file mode 100644 index 00000000..7ccd90dc --- /dev/null +++ b/packages/gcp/docs/models/functioncall.md @@ -0,0 +1,9 @@ +# FunctionCall + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | +| `name` | *str* | :heavy_check_mark: | N/A | +| `arguments` | [models.Arguments](../models/arguments.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/gcp/docs/models/functionname.md b/packages/gcp/docs/models/functionname.md new file mode 100644 index 00000000..87d7b485 --- /dev/null +++ b/packages/gcp/docs/models/functionname.md @@ -0,0 +1,10 @@ +# FunctionName + +this restriction of `Function` is used to select a specific function to call + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `name` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/gcp/docs/models/imagedetail.md b/packages/gcp/docs/models/imagedetail.md new file mode 100644 index 00000000..1e5ba3fd --- /dev/null +++ b/packages/gcp/docs/models/imagedetail.md @@ -0,0 +1,10 @@ +# ImageDetail + + +## Values + +| Name | Value | +| ------ | ------ | +| `LOW` | low | +| `AUTO` | auto | +| `HIGH` | high | \ No newline at end of file diff --git a/packages/gcp/docs/models/imageurl.md b/packages/gcp/docs/models/imageurl.md new file mode 100644 index 00000000..6358e0ac --- /dev/null +++ b/packages/gcp/docs/models/imageurl.md @@ -0,0 +1,9 @@ +# ImageURL + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | +| `url` | *str* | :heavy_check_mark: | N/A | +| `detail` | [OptionalNullable[models.ImageDetail]](../models/imagedetail.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/gcp/docs/models/imageurlchunk.md b/packages/gcp/docs/models/imageurlchunk.md new file mode 100644 index 00000000..a84dac32 --- /dev/null +++ b/packages/gcp/docs/models/imageurlchunk.md @@ -0,0 +1,11 @@ +# ImageURLChunk + +{"type":"image_url","image_url":{"url":"data:image/png;base64,iVBORw0 + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | +| `type` | *Literal["image_url"]* | :heavy_check_mark: | N/A | +| `image_url` | [models.ImageURLUnion](../models/imageurlunion.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/gcp/docs/models/imageurlunion.md b/packages/gcp/docs/models/imageurlunion.md new file mode 100644 index 00000000..db97130f --- /dev/null +++ b/packages/gcp/docs/models/imageurlunion.md @@ -0,0 +1,17 @@ +# ImageURLUnion + + +## Supported Types + +### `models.ImageURL` + +```python +value: models.ImageURL = /* values here */ +``` + +### `str` + +```python +value: str = /* values here */ +``` + diff --git a/packages/gcp/docs/models/jsonschema.md b/packages/gcp/docs/models/jsonschema.md new file mode 100644 index 00000000..7ff7c070 --- /dev/null +++ b/packages/gcp/docs/models/jsonschema.md @@ -0,0 +1,11 @@ +# JSONSchema + + +## Fields + +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `name` | *str* | :heavy_check_mark: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `schema_definition` | Dict[str, *Any*] | :heavy_check_mark: | N/A | +| `strict` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/gcp/docs/models/loc.md b/packages/gcp/docs/models/loc.md new file mode 100644 index 00000000..d6094ac2 --- /dev/null +++ b/packages/gcp/docs/models/loc.md @@ -0,0 +1,17 @@ +# Loc + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `int` + +```python +value: int = /* values here */ +``` + diff --git a/packages/gcp/docs/models/mistralpromptmode.md b/packages/gcp/docs/models/mistralpromptmode.md new file mode 100644 index 00000000..c3409d03 --- /dev/null +++ b/packages/gcp/docs/models/mistralpromptmode.md @@ -0,0 +1,12 @@ +# MistralPromptMode + +Available options to the prompt_mode argument on the chat completion endpoint. +Values represent high-level intent. Assignment to actual SPs is handled internally. +System prompt may include knowledge cutoff date, model capabilities, tone to use, safety guidelines, etc. + + +## Values + +| Name | Value | +| ----------- | ----------- | +| `REASONING` | reasoning | \ No newline at end of file diff --git a/packages/gcp/docs/models/prediction.md b/packages/gcp/docs/models/prediction.md new file mode 100644 index 00000000..fae3c1ca --- /dev/null +++ b/packages/gcp/docs/models/prediction.md @@ -0,0 +1,11 @@ +# Prediction + +Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------ | ------------------------------ | ------------------------------ | ------------------------------ | +| `type` | *Optional[Literal["content"]]* | :heavy_minus_sign: | N/A | +| `content` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/gcp/docs/models/referencechunk.md b/packages/gcp/docs/models/referencechunk.md new file mode 100644 index 00000000..d847e248 --- /dev/null +++ b/packages/gcp/docs/models/referencechunk.md @@ -0,0 +1,9 @@ +# ReferenceChunk + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------- | -------------------------------- | -------------------------------- | -------------------------------- | +| `type` | *Optional[Literal["reference"]]* | :heavy_minus_sign: | N/A | +| `reference_ids` | List[*int*] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/gcp/docs/models/responseformat.md b/packages/gcp/docs/models/responseformat.md new file mode 100644 index 00000000..5cab22f2 --- /dev/null +++ b/packages/gcp/docs/models/responseformat.md @@ -0,0 +1,11 @@ +# ResponseFormat + +Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | +| `type` | [Optional[models.ResponseFormats]](../models/responseformats.md) | :heavy_minus_sign: | N/A | +| `json_schema` | [OptionalNullable[models.JSONSchema]](../models/jsonschema.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/gcp/docs/models/responseformats.md b/packages/gcp/docs/models/responseformats.md new file mode 100644 index 00000000..2f5f1e55 --- /dev/null +++ b/packages/gcp/docs/models/responseformats.md @@ -0,0 +1,10 @@ +# ResponseFormats + + +## Values + +| Name | Value | +| ------------- | ------------- | +| `TEXT` | text | +| `JSON_OBJECT` | json_object | +| `JSON_SCHEMA` | json_schema | \ No newline at end of file diff --git a/packages/gcp/docs/models/security.md b/packages/gcp/docs/models/security.md new file mode 100644 index 00000000..c698674c --- /dev/null +++ b/packages/gcp/docs/models/security.md @@ -0,0 +1,8 @@ +# Security + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `api_key` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/gcp/docs/models/systemmessage.md b/packages/gcp/docs/models/systemmessage.md new file mode 100644 index 00000000..10bda10f --- /dev/null +++ b/packages/gcp/docs/models/systemmessage.md @@ -0,0 +1,9 @@ +# SystemMessage + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | +| `role` | *Literal["system"]* | :heavy_check_mark: | N/A | +| `content` | [models.SystemMessageContent](../models/systemmessagecontent.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/gcp/docs/models/systemmessagecontent.md b/packages/gcp/docs/models/systemmessagecontent.md new file mode 100644 index 00000000..0c87baf3 --- /dev/null +++ b/packages/gcp/docs/models/systemmessagecontent.md @@ -0,0 +1,17 @@ +# SystemMessageContent + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[models.SystemMessageContentChunks]` + +```python +value: List[models.SystemMessageContentChunks] = /* values here */ +``` + diff --git a/packages/gcp/docs/models/systemmessagecontentchunks.md b/packages/gcp/docs/models/systemmessagecontentchunks.md new file mode 100644 index 00000000..40030c17 --- /dev/null +++ b/packages/gcp/docs/models/systemmessagecontentchunks.md @@ -0,0 +1,17 @@ +# SystemMessageContentChunks + + +## Supported Types + +### `models.TextChunk` + +```python +value: models.TextChunk = /* values here */ +``` + +### `models.ThinkChunk` + +```python +value: models.ThinkChunk = /* values here */ +``` + diff --git a/packages/gcp/docs/models/textchunk.md b/packages/gcp/docs/models/textchunk.md new file mode 100644 index 00000000..b266619d --- /dev/null +++ b/packages/gcp/docs/models/textchunk.md @@ -0,0 +1,9 @@ +# TextChunk + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `type` | *Literal["text"]* | :heavy_check_mark: | N/A | +| `text` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/gcp/docs/models/thinkchunk.md b/packages/gcp/docs/models/thinkchunk.md new file mode 100644 index 00000000..b07f598e --- /dev/null +++ b/packages/gcp/docs/models/thinkchunk.md @@ -0,0 +1,10 @@ +# ThinkChunk + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | +| `type` | *Literal["thinking"]* | :heavy_check_mark: | N/A | +| `thinking` | List[[models.Thinking](../models/thinking.md)] | :heavy_check_mark: | N/A | +| `closed` | *Optional[bool]* | :heavy_minus_sign: | Whether the thinking chunk is closed or not. Currently only used for prefixing. | \ No newline at end of file diff --git a/packages/gcp/docs/models/thinking.md b/packages/gcp/docs/models/thinking.md new file mode 100644 index 00000000..c7a0d5c9 --- /dev/null +++ b/packages/gcp/docs/models/thinking.md @@ -0,0 +1,17 @@ +# Thinking + + +## Supported Types + +### `models.ReferenceChunk` + +```python +value: models.ReferenceChunk = /* values here */ +``` + +### `models.TextChunk` + +```python +value: models.TextChunk = /* values here */ +``` + diff --git a/packages/gcp/docs/models/tool.md b/packages/gcp/docs/models/tool.md new file mode 100644 index 00000000..fb661f72 --- /dev/null +++ b/packages/gcp/docs/models/tool.md @@ -0,0 +1,9 @@ +# Tool + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | +| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | +| `function` | [models.Function](../models/function.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/gcp/docs/models/toolcall.md b/packages/gcp/docs/models/toolcall.md new file mode 100644 index 00000000..3819236b --- /dev/null +++ b/packages/gcp/docs/models/toolcall.md @@ -0,0 +1,11 @@ +# ToolCall + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | +| `function` | [models.FunctionCall](../models/functioncall.md) | :heavy_check_mark: | N/A | +| `index` | *Optional[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/gcp/docs/models/toolchoice.md b/packages/gcp/docs/models/toolchoice.md new file mode 100644 index 00000000..373046bb --- /dev/null +++ b/packages/gcp/docs/models/toolchoice.md @@ -0,0 +1,11 @@ +# ToolChoice + +ToolChoice is either a ToolChoiceEnum or a ToolChoice + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | +| `function` | [models.FunctionName](../models/functionname.md) | :heavy_check_mark: | this restriction of `Function` is used to select a specific function to call | \ No newline at end of file diff --git a/packages/gcp/docs/models/toolchoiceenum.md b/packages/gcp/docs/models/toolchoiceenum.md new file mode 100644 index 00000000..0be3d6c5 --- /dev/null +++ b/packages/gcp/docs/models/toolchoiceenum.md @@ -0,0 +1,11 @@ +# ToolChoiceEnum + + +## Values + +| Name | Value | +| ---------- | ---------- | +| `AUTO` | auto | +| `NONE` | none | +| `ANY` | any | +| `REQUIRED` | required | \ No newline at end of file diff --git a/packages/gcp/docs/models/toolmessage.md b/packages/gcp/docs/models/toolmessage.md new file mode 100644 index 00000000..7201481e --- /dev/null +++ b/packages/gcp/docs/models/toolmessage.md @@ -0,0 +1,11 @@ +# ToolMessage + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | +| `role` | *Literal["tool"]* | :heavy_check_mark: | N/A | +| `content` | [Nullable[models.ToolMessageContent]](../models/toolmessagecontent.md) | :heavy_check_mark: | N/A | +| `tool_call_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/gcp/docs/models/toolmessagecontent.md b/packages/gcp/docs/models/toolmessagecontent.md new file mode 100644 index 00000000..5c76091f --- /dev/null +++ b/packages/gcp/docs/models/toolmessagecontent.md @@ -0,0 +1,17 @@ +# ToolMessageContent + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[models.ContentChunk]` + +```python +value: List[models.ContentChunk] = /* values here */ +``` + diff --git a/packages/gcp/docs/models/tooltypes.md b/packages/gcp/docs/models/tooltypes.md new file mode 100644 index 00000000..84e49253 --- /dev/null +++ b/packages/gcp/docs/models/tooltypes.md @@ -0,0 +1,8 @@ +# ToolTypes + + +## Values + +| Name | Value | +| ---------- | ---------- | +| `FUNCTION` | function | \ No newline at end of file diff --git a/packages/gcp/docs/models/usageinfo.md b/packages/gcp/docs/models/usageinfo.md new file mode 100644 index 00000000..f5204ac9 --- /dev/null +++ b/packages/gcp/docs/models/usageinfo.md @@ -0,0 +1,12 @@ +# UsageInfo + + +## Fields + +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `prompt_tokens` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `completion_tokens` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `total_tokens` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `prompt_audio_seconds` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/gcp/docs/models/usermessage.md b/packages/gcp/docs/models/usermessage.md new file mode 100644 index 00000000..e7a932ed --- /dev/null +++ b/packages/gcp/docs/models/usermessage.md @@ -0,0 +1,9 @@ +# UserMessage + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | +| `role` | *Literal["user"]* | :heavy_check_mark: | N/A | +| `content` | [Nullable[models.UserMessageContent]](../models/usermessagecontent.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/gcp/docs/models/usermessagecontent.md b/packages/gcp/docs/models/usermessagecontent.md new file mode 100644 index 00000000..8350f9e8 --- /dev/null +++ b/packages/gcp/docs/models/usermessagecontent.md @@ -0,0 +1,17 @@ +# UserMessageContent + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[models.ContentChunk]` + +```python +value: List[models.ContentChunk] = /* values here */ +``` + diff --git a/packages/gcp/docs/models/utils/retryconfig.md b/packages/gcp/docs/models/utils/retryconfig.md new file mode 100644 index 00000000..69dd549e --- /dev/null +++ b/packages/gcp/docs/models/utils/retryconfig.md @@ -0,0 +1,24 @@ +# RetryConfig + +Allows customizing the default retry configuration. Only usable with methods that mention they support retries. + +## Fields + +| Name | Type | Description | Example | +| ------------------------- | ----------------------------------- | --------------------------------------- | --------- | +| `strategy` | `*str*` | The retry strategy to use. | `backoff` | +| `backoff` | [BackoffStrategy](#backoffstrategy) | Configuration for the backoff strategy. | | +| `retry_connection_errors` | `*bool*` | Whether to retry on connection errors. | `true` | + +## BackoffStrategy + +The backoff strategy allows retrying a request with an exponential backoff between each retry. + +### Fields + +| Name | Type | Description | Example | +| ------------------ | --------- | ----------------------------------------- | -------- | +| `initial_interval` | `*int*` | The initial interval in milliseconds. | `500` | +| `max_interval` | `*int*` | The maximum interval in milliseconds. | `60000` | +| `exponent` | `*float*` | The exponent to use for the backoff. | `1.5` | +| `max_elapsed_time` | `*int*` | The maximum elapsed time in milliseconds. | `300000` | \ No newline at end of file diff --git a/packages/gcp/docs/models/validationerror.md b/packages/gcp/docs/models/validationerror.md new file mode 100644 index 00000000..7a1654a1 --- /dev/null +++ b/packages/gcp/docs/models/validationerror.md @@ -0,0 +1,10 @@ +# ValidationError + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------ | ------------------------------------ | ------------------------------------ | ------------------------------------ | +| `loc` | List[[models.Loc](../models/loc.md)] | :heavy_check_mark: | N/A | +| `msg` | *str* | :heavy_check_mark: | N/A | +| `type` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/gcp/docs/sdks/chat/README.md b/packages/gcp/docs/sdks/chat/README.md new file mode 100644 index 00000000..a1fdfd9a --- /dev/null +++ b/packages/gcp/docs/sdks/chat/README.md @@ -0,0 +1,127 @@ +# Chat +(*chat*) + +## Overview + +Chat Completion API. + +### Available Operations + +* [stream](#stream) - Stream chat completion +* [complete](#complete) - Chat Completion + +## stream + +Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + +### Example Usage + +```python +import os +from mistralai.gcp.client import MistralGCP + +# The SDK auto-detects credentials and builds the Vertex AI URL +s = MistralGCP( + project_id=os.environ.get("GCP_PROJECT_ID"), # Optional: auto-detected from credentials + region=os.environ.get("GCP_REGION", "us-central1"), +) + +res = s.chat.stream(messages=[ + { + "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", + }, +], model="mistral-small-2503") + +if res is not None: + for event in res: + # handle event + print(event) + +``` + +### Parameters + +| Parameter | Type | Required | Description | Example | +| ----------------- | ----------------------------------------------------------------- | ------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | +| `messages` | List[[models.Messages](../../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | {
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
} | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | +| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `stop` | [Optional[models.Stop]](../../models/stop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | N/A | | +| `tools` | List[[models.Tool](../../models/tool.md)] | :heavy_minus_sign: | N/A | | +| `tool_choice` | [Optional[models.ToolChoice]](../../models/toolchoice.md) | :heavy_minus_sign: | N/A | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | + + +### Response + +**[Union[Generator[models.CompletionEvent, None, None], AsyncGenerator[models.CompletionEvent, None]]](../../models/.md)** +### Errors + +| Error Object | Status Code | Content Type | +| --------------- | ----------- | ------------ | +| models.SDKError | 4xx-5xx | */* | + +## complete + +Chat Completion + +### Example Usage + +```python +import os +from mistralai.gcp.client import MistralGCP + +# The SDK auto-detects credentials and builds the Vertex AI URL +s = MistralGCP( + project_id=os.environ.get("GCP_PROJECT_ID"), # Optional: auto-detected from credentials + region=os.environ.get("GCP_REGION", "us-central1"), +) + +res = s.chat.complete(messages=[ + { + "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", + }, +], model="mistral-small-2503") + +if res is not None: + # handle response + print(res.choices[0].message.content) + +``` + +### Parameters + +| Parameter | Type | Required | Description | Example | +| ----------------- | --------------------------------------------------------------------------------------------------- | ------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | +| `messages` | List[[models.ChatCompletionRequestMessages](../../models/chatcompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | {
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
} | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | +| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | +| `stop` | [Optional[models.ChatCompletionRequestStop]](../../models/chatcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | N/A | | +| `tools` | List[[models.Tool](../../models/tool.md)] | :heavy_minus_sign: | N/A | | +| `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | + + +### Response + +**[models.ChatCompletionResponse](../../models/chatcompletionresponse.md)** +### Errors + +| Error Object | Status Code | Content Type | +| -------------------------- | ----------- | ---------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4xx-5xx | */* | diff --git a/packages/gcp/docs/sdks/fim/README.md b/packages/gcp/docs/sdks/fim/README.md new file mode 100644 index 00000000..61a28883 --- /dev/null +++ b/packages/gcp/docs/sdks/fim/README.md @@ -0,0 +1,113 @@ +# Fim +(*fim*) + +## Overview + +Fill-in-the-middle API. + +### Available Operations + +* [stream](#stream) - Stream fim completion +* [complete](#complete) - Fim Completion + +## stream + +Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + +### Example Usage + +```python +import os +from mistralai.gcp.client import MistralGCP + +# The SDK auto-detects credentials and builds the Vertex AI URL +s = MistralGCP( + project_id=os.environ.get("GCP_PROJECT_ID"), # Optional: auto-detected from credentials + region=os.environ.get("GCP_REGION", "us-central1"), +) + +res = s.fim.stream(prompt="def", model="codestral-2", suffix="return a+b") + +if res is not None: + for event in res: + # handle event + print(event) + +``` + +### Parameters + +| Parameter | Type | Required | Description | Example | +| ------------- | ------------------------------------------------------------------------------------------------- | ------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------- | +| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | +| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `stop` | [Optional[models.FIMCompletionStreamRequestStop]](../../models/fimcompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | + + +### Response + +**[Union[Generator[models.CompletionEvent, None, None], AsyncGenerator[models.CompletionEvent, None]]](../../models/.md)** +### Errors + +| Error Object | Status Code | Content Type | +| --------------- | ----------- | ------------ | +| models.SDKError | 4xx-5xx | */* | + +## complete + +FIM completion. + +### Example Usage + +```python +import os +from mistralai.gcp.client import MistralGCP + +# The SDK auto-detects credentials and builds the Vertex AI URL +s = MistralGCP( + project_id=os.environ.get("GCP_PROJECT_ID"), # Optional: auto-detected from credentials + region=os.environ.get("GCP_REGION", "us-central1"), +) + +res = s.fim.complete(prompt="def", model="codestral-2", suffix="return a+b") + +if res is not None: + # handle response + print(res.choices[0].message.content) + +``` + +### Parameters + +| Parameter | Type | Required | Description | Example | +| ------------- | ------------------------------------------------------------------------------------- | ------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------- | +| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | +| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | +| `stop` | [Optional[models.FIMCompletionRequestStop]](../../models/fimcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | + + +### Response + +**[models.FIMCompletionResponse](../../models/fimcompletionresponse.md)** +### Errors + +| Error Object | Status Code | Content Type | +| -------------------------- | ----------- | ---------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4xx-5xx | */* | diff --git a/packages/gcp/docs/sdks/mistralgcp/README.md b/packages/gcp/docs/sdks/mistralgcp/README.md new file mode 100644 index 00000000..84963a9e --- /dev/null +++ b/packages/gcp/docs/sdks/mistralgcp/README.md @@ -0,0 +1,9 @@ +# MistralGCP SDK + + +## Overview + +Mistral AI API: Our Chat Completion and Embeddings APIs specification. Create your account on [La Plateforme](https://console.mistral.ai) to get access and read the [docs](https://docs.mistral.ai) to learn how to use it. + +### Available Operations + diff --git a/packages/gcp/py.typed b/packages/gcp/py.typed new file mode 100644 index 00000000..3e38f1a9 --- /dev/null +++ b/packages/gcp/py.typed @@ -0,0 +1 @@ +# Marker file for PEP 561. The package enables type hints. diff --git a/packages/gcp/pylintrc b/packages/gcp/pylintrc new file mode 100644 index 00000000..0391ac11 --- /dev/null +++ b/packages/gcp/pylintrc @@ -0,0 +1,664 @@ +[MAIN] + +# Analyse import fallback blocks. This can be used to support both Python 2 and +# 3 compatible code, which means that the block might have code that exists +# only in one or another interpreter, leading to false positives when analysed. +analyse-fallback-blocks=no + +# Clear in-memory caches upon conclusion of linting. Useful if running pylint +# in a server-like mode. +clear-cache-post-run=no + +# Load and enable all available extensions. Use --list-extensions to see a list +# all available extensions. +#enable-all-extensions= + +# In error mode, messages with a category besides ERROR or FATAL are +# suppressed, and no reports are done by default. Error mode is compatible with +# disabling specific errors. +#errors-only= + +# Always return a 0 (non-error) status code, even if lint errors are found. +# This is primarily useful in continuous integration scripts. +#exit-zero= + +# A comma-separated list of package or module names from where C extensions may +# be loaded. Extensions are loading into the active Python interpreter and may +# run arbitrary code. +extension-pkg-allow-list= + +# A comma-separated list of package or module names from where C extensions may +# be loaded. Extensions are loading into the active Python interpreter and may +# run arbitrary code. (This is an alternative name to extension-pkg-allow-list +# for backward compatibility.) +extension-pkg-whitelist= + +# Return non-zero exit code if any of these messages/categories are detected, +# even if score is above --fail-under value. Syntax same as enable. Messages +# specified are enabled, while categories only check already-enabled messages. +fail-on= + +# Specify a score threshold under which the program will exit with error. +fail-under=10 + +# Interpret the stdin as a python script, whose filename needs to be passed as +# the module_or_package argument. +#from-stdin= + +# Files or directories to be skipped. They should be base names, not paths. +ignore=CVS + +# Add files or directories matching the regular expressions patterns to the +# ignore-list. The regex matches against paths and can be in Posix or Windows +# format. Because '\\' represents the directory delimiter on Windows systems, +# it can't be used as an escape character. +ignore-paths= + +# Files or directories matching the regular expression patterns are skipped. +# The regex matches against base names, not paths. The default value ignores +# Emacs file locks +ignore-patterns=^\.# + +# List of module names for which member attributes should not be checked and +# will not be imported (useful for modules/projects where namespaces are +# manipulated during runtime and thus existing member attributes cannot be +# deduced by static analysis). It supports qualified module names, as well as +# Unix pattern matching. +ignored-modules= + +# Python code to execute, usually for sys.path manipulation such as +# pygtk.require(). +#init-hook= + +# Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the +# number of processors available to use, and will cap the count on Windows to +# avoid hangs. +jobs=1 + +# Control the amount of potential inferred values when inferring a single +# object. This can help the performance when dealing with large functions or +# complex, nested conditions. +limit-inference-results=100 + +# List of plugins (as comma separated values of python module names) to load, +# usually to register additional checkers. +load-plugins= + +# Pickle collected data for later comparisons. +persistent=yes + +# Minimum Python version to use for version dependent checks. Will default to +# the version used to run pylint. +py-version=3.10 + +# Discover python modules and packages in the file system subtree. +recursive=no + +# Add paths to the list of the source roots. Supports globbing patterns. The +# source root is an absolute path or a path relative to the current working +# directory used to determine a package namespace for modules located under the +# source root. +source-roots=src + +# When enabled, pylint would attempt to guess common misconfiguration and emit +# user-friendly hints instead of false-positive error messages. +suggestion-mode=yes + +# Allow loading of arbitrary C extensions. Extensions are imported into the +# active Python interpreter and may run arbitrary code. +unsafe-load-any-extension=no + +# In verbose mode, extra non-checker-related info will be displayed. +#verbose= + + +[BASIC] + +# Naming style matching correct argument names. +argument-naming-style=snake_case + +# Regular expression matching correct argument names. Overrides argument- +# naming-style. If left empty, argument names will be checked with the set +# naming style. +#argument-rgx= + +# Naming style matching correct attribute names. +#attr-naming-style=snake_case + +# Regular expression matching correct attribute names. Overrides attr-naming- +# style. If left empty, attribute names will be checked with the set naming +# style. +attr-rgx=[^\W\d][^\W]*|__.*__$ + +# Bad variable names which should always be refused, separated by a comma. +bad-names= + +# Bad variable names regexes, separated by a comma. If names match any regex, +# they will always be refused +bad-names-rgxs= + +# Naming style matching correct class attribute names. +class-attribute-naming-style=any + +# Regular expression matching correct class attribute names. Overrides class- +# attribute-naming-style. If left empty, class attribute names will be checked +# with the set naming style. +#class-attribute-rgx= + +# Naming style matching correct class constant names. +class-const-naming-style=UPPER_CASE + +# Regular expression matching correct class constant names. Overrides class- +# const-naming-style. If left empty, class constant names will be checked with +# the set naming style. +#class-const-rgx= + +# Naming style matching correct class names. +class-naming-style=PascalCase + +# Regular expression matching correct class names. Overrides class-naming- +# style. If left empty, class names will be checked with the set naming style. +#class-rgx= + +# Naming style matching correct constant names. +const-naming-style=UPPER_CASE + +# Regular expression matching correct constant names. Overrides const-naming- +# style. If left empty, constant names will be checked with the set naming +# style. +#const-rgx= + +# Minimum line length for functions/classes that require docstrings, shorter +# ones are exempt. +docstring-min-length=-1 + +# Naming style matching correct function names. +function-naming-style=snake_case + +# Regular expression matching correct function names. Overrides function- +# naming-style. If left empty, function names will be checked with the set +# naming style. +#function-rgx= + +# Good variable names which should always be accepted, separated by a comma. +good-names=i, + j, + k, + ex, + Run, + _, + e, + id, + n + +# Good variable names regexes, separated by a comma. If names match any regex, +# they will always be accepted +good-names-rgxs= + +# Include a hint for the correct naming format with invalid-name. +include-naming-hint=no + +# Naming style matching correct inline iteration names. +inlinevar-naming-style=any + +# Regular expression matching correct inline iteration names. Overrides +# inlinevar-naming-style. If left empty, inline iteration names will be checked +# with the set naming style. +#inlinevar-rgx= + +# Naming style matching correct method names. +method-naming-style=snake_case + +# Regular expression matching correct method names. Overrides method-naming- +# style. If left empty, method names will be checked with the set naming style. +#method-rgx= + +# Naming style matching correct module names. +module-naming-style=snake_case + +# Regular expression matching correct module names. Overrides module-naming- +# style. If left empty, module names will be checked with the set naming style. +#module-rgx= + +# Colon-delimited sets of names that determine each other's naming style when +# the name regexes allow several styles. +name-group= + +# Regular expression which should only match function or class names that do +# not require a docstring. +no-docstring-rgx=^_ + +# List of decorators that produce properties, such as abc.abstractproperty. Add +# to this list to register other decorators that produce valid properties. +# These decorators are taken in consideration only for invalid-name. +property-classes=abc.abstractproperty + +# Regular expression matching correct type alias names. If left empty, type +# alias names will be checked with the set naming style. +typealias-rgx=.* + +# Regular expression matching correct type variable names. If left empty, type +# variable names will be checked with the set naming style. +#typevar-rgx= + +# Naming style matching correct variable names. +variable-naming-style=snake_case + +# Regular expression matching correct variable names. Overrides variable- +# naming-style. If left empty, variable names will be checked with the set +# naming style. +#variable-rgx= + + +[CLASSES] + +# Warn about protected attribute access inside special methods +check-protected-access-in-special-methods=no + +# List of method names used to declare (i.e. assign) instance attributes. +defining-attr-methods=__init__, + __new__, + setUp, + asyncSetUp, + __post_init__ + +# List of member names, which should be excluded from the protected access +# warning. +exclude-protected=_asdict,_fields,_replace,_source,_make,os._exit + +# List of valid names for the first argument in a class method. +valid-classmethod-first-arg=cls + +# List of valid names for the first argument in a metaclass class method. +valid-metaclass-classmethod-first-arg=mcs + + +[DESIGN] + +# List of regular expressions of class ancestor names to ignore when counting +# public methods (see R0903) +exclude-too-few-public-methods= + +# List of qualified class names to ignore when counting class parents (see +# R0901) +ignored-parents= + +# Maximum number of arguments for function / method. +max-args=5 + +# Maximum number of attributes for a class (see R0902). +max-attributes=7 + +# Maximum number of boolean expressions in an if statement (see R0916). +max-bool-expr=5 + +# Maximum number of branch for function / method body. +max-branches=12 + +# Maximum number of locals for function / method body. +max-locals=15 + +# Maximum number of parents for a class (see R0901). +max-parents=7 + +# Maximum number of public methods for a class (see R0904). +max-public-methods=25 + +# Maximum number of return / yield for function / method body. +max-returns=6 + +# Maximum number of statements in function / method body. +max-statements=50 + +# Minimum number of public methods for a class (see R0903). +min-public-methods=2 + + +[EXCEPTIONS] + +# Exceptions that will emit a warning when caught. +overgeneral-exceptions=builtins.BaseException,builtins.Exception + + +[FORMAT] + +# Expected format of line ending, e.g. empty (any line ending), LF or CRLF. +expected-line-ending-format= + +# Regexp for a line that is allowed to be longer than the limit. +ignore-long-lines=^\s*(# )??$ + +# Number of spaces of indent required inside a hanging or continued line. +indent-after-paren=4 + +# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 +# tab). +indent-string=' ' + +# Maximum number of characters on a single line. +max-line-length=100 + +# Maximum number of lines in a module. +max-module-lines=1000 + +# Allow the body of a class to be on the same line as the declaration if body +# contains single statement. +single-line-class-stmt=no + +# Allow the body of an if to be on the same line as the test if there is no +# else. +single-line-if-stmt=no + + +[IMPORTS] + +# List of modules that can be imported at any level, not just the top level +# one. +allow-any-import-level= + +# Allow explicit reexports by alias from a package __init__. +allow-reexport-from-package=no + +# Allow wildcard imports from modules that define __all__. +allow-wildcard-with-all=no + +# Deprecated modules which should not be used, separated by a comma. +deprecated-modules= + +# Output a graph (.gv or any supported image format) of external dependencies +# to the given file (report RP0402 must not be disabled). +ext-import-graph= + +# Output a graph (.gv or any supported image format) of all (i.e. internal and +# external) dependencies to the given file (report RP0402 must not be +# disabled). +import-graph= + +# Output a graph (.gv or any supported image format) of internal dependencies +# to the given file (report RP0402 must not be disabled). +int-import-graph= + +# Force import order to recognize a module as part of the standard +# compatibility libraries. +known-standard-library= + +# Force import order to recognize a module as part of a third party library. +known-third-party=enchant + +# Couples of modules and preferred modules, separated by a comma. +preferred-modules= + + +[LOGGING] + +# The type of string formatting that logging methods do. `old` means using % +# formatting, `new` is for `{}` formatting. +logging-format-style=old + +# Logging modules to check that the string format arguments are in logging +# function parameter format. +logging-modules=logging + + +[MESSAGES CONTROL] + +# Only show warnings with the listed confidence levels. Leave empty to show +# all. Valid levels: HIGH, CONTROL_FLOW, INFERENCE, INFERENCE_FAILURE, +# UNDEFINED. +confidence=HIGH, + CONTROL_FLOW, + INFERENCE, + INFERENCE_FAILURE, + UNDEFINED + +# Disable the message, report, category or checker with the given id(s). You +# can either give multiple identifiers separated by comma (,) or put this +# option multiple times (only on the command line, not in the configuration +# file where it should appear only once). You can also use "--disable=all" to +# disable everything first and then re-enable specific checks. For example, if +# you want to run only the similarities checker, you can use "--disable=all +# --enable=similarities". If you want to run only the classes checker, but have +# no Warning level messages displayed, use "--disable=all --enable=classes +# --disable=W". +disable=raw-checker-failed, + bad-inline-option, + locally-disabled, + file-ignored, + suppressed-message, + useless-suppression, + deprecated-pragma, + use-implicit-booleaness-not-comparison-to-string, + use-implicit-booleaness-not-comparison-to-zero, + use-symbolic-message-instead, + trailing-whitespace, + line-too-long, + missing-class-docstring, + missing-module-docstring, + missing-function-docstring, + too-many-instance-attributes, + wrong-import-order, + too-many-arguments, + broad-exception-raised, + too-few-public-methods, + too-many-branches, + duplicate-code, + trailing-newlines, + too-many-public-methods, + too-many-locals, + too-many-lines, + using-constant-test, + too-many-statements, + cyclic-import, + too-many-nested-blocks, + too-many-boolean-expressions, + no-else-raise, + bare-except, + broad-exception-caught, + fixme, + relative-beyond-top-level, + consider-using-with, + wildcard-import, + unused-wildcard-import, + too-many-return-statements, + redefined-builtin + +# Enable the message, report, category or checker with the given id(s). You can +# either give multiple identifier separated by comma (,) or put this option +# multiple time (only on the command line, not in the configuration file where +# it should appear only once). See also the "--disable" option for examples. +enable= + + +[METHOD_ARGS] + +# List of qualified names (i.e., library.method) which require a timeout +# parameter e.g. 'requests.api.get,requests.api.post' +timeout-methods=requests.api.delete,requests.api.get,requests.api.head,requests.api.options,requests.api.patch,requests.api.post,requests.api.put,requests.api.request + + +[MISCELLANEOUS] + +# List of note tags to take in consideration, separated by a comma. +notes=FIXME, + XXX, + TODO + +# Regular expression of note tags to take in consideration. +notes-rgx= + + +[REFACTORING] + +# Maximum number of nested blocks for function / method body +max-nested-blocks=5 + +# Complete name of functions that never returns. When checking for +# inconsistent-return-statements if a never returning function is called then +# it will be considered as an explicit return statement and no message will be +# printed. +never-returning-functions=sys.exit,argparse.parse_error + + +[REPORTS] + +# Python expression which should return a score less than or equal to 10. You +# have access to the variables 'fatal', 'error', 'warning', 'refactor', +# 'convention', and 'info' which contain the number of messages in each +# category, as well as 'statement' which is the total number of statements +# analyzed. This score is used by the global evaluation report (RP0004). +evaluation=max(0, 0 if fatal else 10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)) + +# Template used to display messages. This is a python new-style format string +# used to format the message information. See doc for all details. +msg-template= + +# Set the output format. Available formats are: text, parseable, colorized, +# json2 (improved json format), json (old json format) and msvs (visual +# studio). You can also give a reporter class, e.g. +# mypackage.mymodule.MyReporterClass. +#output-format= + +# Tells whether to display a full report or only the messages. +reports=no + +# Activate the evaluation score. +score=yes + + +[SIMILARITIES] + +# Comments are removed from the similarity computation +ignore-comments=yes + +# Docstrings are removed from the similarity computation +ignore-docstrings=yes + +# Imports are removed from the similarity computation +ignore-imports=yes + +# Signatures are removed from the similarity computation +ignore-signatures=yes + +# Minimum lines number of a similarity. +min-similarity-lines=4 + + +[SPELLING] + +# Limits count of emitted suggestions for spelling mistakes. +max-spelling-suggestions=4 + +# Spelling dictionary name. No available dictionaries : You need to install +# both the python package and the system dependency for enchant to work. +spelling-dict= + +# List of comma separated words that should be considered directives if they +# appear at the beginning of a comment and should not be checked. +spelling-ignore-comment-directives=fmt: on,fmt: off,noqa:,noqa,nosec,isort:skip,mypy: + +# List of comma separated words that should not be checked. +spelling-ignore-words= + +# A path to a file that contains the private dictionary; one word per line. +spelling-private-dict-file= + +# Tells whether to store unknown words to the private dictionary (see the +# --spelling-private-dict-file option) instead of raising a message. +spelling-store-unknown-words=no + + +[STRING] + +# This flag controls whether inconsistent-quotes generates a warning when the +# character used as a quote delimiter is used inconsistently within a module. +check-quote-consistency=no + +# This flag controls whether the implicit-str-concat should generate a warning +# on implicit string concatenation in sequences defined over several lines. +check-str-concat-over-line-jumps=no + + +[TYPECHECK] + +# List of decorators that produce context managers, such as +# contextlib.contextmanager. Add to this list to register other decorators that +# produce valid context managers. +contextmanager-decorators=contextlib.contextmanager + +# List of members which are set dynamically and missed by pylint inference +# system, and so shouldn't trigger E1101 when accessed. Python regular +# expressions are accepted. +generated-members= + +# Tells whether to warn about missing members when the owner of the attribute +# is inferred to be None. +ignore-none=yes + +# This flag controls whether pylint should warn about no-member and similar +# checks whenever an opaque object is returned when inferring. The inference +# can return multiple potential results while evaluating a Python object, but +# some branches might not be evaluated, which results in partial inference. In +# that case, it might be useful to still emit no-member and other checks for +# the rest of the inferred objects. +ignore-on-opaque-inference=yes + +# List of symbolic message names to ignore for Mixin members. +ignored-checks-for-mixins=no-member, + not-async-context-manager, + not-context-manager, + attribute-defined-outside-init + +# List of class names for which member attributes should not be checked (useful +# for classes with dynamically set attributes). This supports the use of +# qualified names. +ignored-classes=optparse.Values,thread._local,_thread._local,argparse.Namespace + +# Show a hint with possible names when a member name was not found. The aspect +# of finding the hint is based on edit distance. +missing-member-hint=yes + +# The minimum edit distance a name should have in order to be considered a +# similar match for a missing member name. +missing-member-hint-distance=1 + +# The total number of similar names that should be taken in consideration when +# showing a hint for a missing member. +missing-member-max-choices=1 + +# Regex pattern to define which classes are considered mixins. +mixin-class-rgx=.*[Mm]ixin + +# List of decorators that change the signature of a decorated function. +signature-mutators= + + +[VARIABLES] + +# List of additional names supposed to be defined in builtins. Remember that +# you should avoid defining new builtins when possible. +additional-builtins= + +# Tells whether unused global variables should be treated as a violation. +allow-global-unused-variables=yes + +# List of names allowed to shadow builtins +allowed-redefined-builtins=id,object,input,dir + +# List of strings which can identify a callback function by name. A callback +# name must start or end with one of those strings. +callbacks=cb_, + _cb + +# A regular expression matching the name of dummy variables (i.e. expected to +# not be used). +dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_ + +# Argument names that match this expression will be ignored. +ignored-argument-names=_.*|^ignored_|^unused_ + +# Tells whether we should check for unused import in __init__ files. +init-import=no + +# List of qualified module names which can have objects that can redefine +# builtins. +redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io \ No newline at end of file diff --git a/packages/gcp/pyproject.toml b/packages/gcp/pyproject.toml new file mode 100644 index 00000000..e0c4af9a --- /dev/null +++ b/packages/gcp/pyproject.toml @@ -0,0 +1,74 @@ +[project] +name = "mistralai-gcp" +version = "2.0.0rc1" +description = "Python Client SDK for the Mistral AI API in GCP." +authors = [{ name = "Mistral" }] +requires-python = ">=3.10" +readme = "README.md" +dependencies = [ + "eval-type-backport >=0.2.0", + "google-auth (>=2.31.0,<3.0.0)", + "httpx >=0.28.1", + "pydantic >=2.11.2", + "python-dateutil >=2.8.2", + "requests (>=2.32.3,<3.0.0)", + "typing-inspection >=0.4.0", +] + +[dependency-groups] +dev = [ + "mypy==1.15.0", + "pylint==3.2.3", + "pyright>=1.1.401,<2", + "pytest>=8.2.2,<9", + "pytest-asyncio>=0.23.7,<0.24", + "types-python-dateutil>=2.9.0.20240316,<3", +] + +[tool.setuptools.package-data] +"*" = ["py.typed", "src/mistralai/gcp/client/py.typed"] + +[tool.hatch.build.targets.sdist] +include = ["src/mistralai"] + +[tool.hatch.build.targets.sdist.force-include] +"py.typed" = "py.typed" +"src/mistralai/gcp/client/py.typed" = "src/mistralai/gcp/client/py.typed" + +[tool.hatch.build.targets.wheel] +include = ["src/mistralai"] + +[tool.hatch.build.targets.wheel.sources] +"src" = "" + +[virtualenvs] +in-project = true + +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[tool.pytest.ini_options] +pythonpath = ["src"] + +[tool.mypy] +disable_error_code = "misc" +namespace_packages = true +explicit_package_bases = true +mypy_path = "src" + +[[tool.mypy.overrides]] +module = "typing_inspect" +ignore_missing_imports = true + +[[tool.mypy.overrides]] +module = "jsonpath" +ignore_missing_imports = true + +[[tool.mypy.overrides]] +module = "google" +ignore_missing_imports = true + +[tool.pyright] +venvPath = "." +venv = ".venv" diff --git a/packages/gcp/scripts/prepare_readme.py b/packages/gcp/scripts/prepare_readme.py new file mode 100644 index 00000000..ae27b555 --- /dev/null +++ b/packages/gcp/scripts/prepare_readme.py @@ -0,0 +1,38 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import re +import shutil + +try: + with open("README.md", "r", encoding="utf-8") as rh: + readme_contents = rh.read() + GITHUB_URL = "https://github.com/mistralai/client-python.git" + GITHUB_URL = ( + GITHUB_URL[: -len(".git")] if GITHUB_URL.endswith(".git") else GITHUB_URL + ) + REPO_SUBDIR = "packages/gcp" + # Ensure the subdirectory has a trailing slash + if not REPO_SUBDIR.endswith("/"): + REPO_SUBDIR += "/" + # links on PyPI should have absolute URLs + readme_contents = re.sub( + r"(\[[^\]]+\]\()((?!https?:)[^\)]+)(\))", + lambda m: m.group(1) + + GITHUB_URL + + "/blob/master/" + + REPO_SUBDIR + + m.group(2) + + m.group(3), + readme_contents, + ) + + with open("README-PYPI.md", "w", encoding="utf-8") as wh: + wh.write(readme_contents) +except Exception as e: + try: + print("Failed to rewrite README.md to README-PYPI.md, copying original instead") + print(e) + shutil.copyfile("README.md", "README-PYPI.md") + except Exception as ie: + print("Failed to copy README.md to README-PYPI.md") + print(ie) diff --git a/packages/gcp/scripts/publish.sh b/packages/gcp/scripts/publish.sh new file mode 100755 index 00000000..c35748f3 --- /dev/null +++ b/packages/gcp/scripts/publish.sh @@ -0,0 +1,6 @@ +#!/usr/bin/env bash + +uv run python scripts/prepare_readme.py + +uv build +uv publish --token $PYPI_TOKEN diff --git a/packages/gcp/src/mistralai/gcp/client/__init__.py b/packages/gcp/src/mistralai/gcp/client/__init__.py new file mode 100644 index 00000000..833c68cd --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/__init__.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from ._version import ( + __title__, + __version__, + __openapi_doc_version__, + __gen_version__, + __user_agent__, +) +from .sdk import * +from .sdkconfiguration import * + + +VERSION: str = __version__ +OPENAPI_DOC_VERSION = __openapi_doc_version__ +SPEAKEASY_GENERATOR_VERSION = __gen_version__ +USER_AGENT = __user_agent__ diff --git a/packages/gcp/src/mistralai/gcp/client/_hooks/__init__.py b/packages/gcp/src/mistralai/gcp/client/_hooks/__init__.py new file mode 100644 index 00000000..2ee66cdd --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/_hooks/__init__.py @@ -0,0 +1,5 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .sdkhooks import * +from .types import * +from .registration import * diff --git a/packages/gcp/src/mistralai/gcp/client/_hooks/registration.py b/packages/gcp/src/mistralai/gcp/client/_hooks/registration.py new file mode 100644 index 00000000..23d3283d --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/_hooks/registration.py @@ -0,0 +1,67 @@ +import json +import logging +from .types import BeforeRequestHook, BeforeRequestContext, Hooks +import httpx + +logger = logging.getLogger(__name__) + + +# This file is only ever generated once on the first generation and then is free to be modified. +# Any hooks you wish to add should be registered in the init_hooks function. Feel free to define them +# in this file or in separate files in the hooks folder. + + +class GCPVertexAIPathHook(BeforeRequestHook): + """Build full Vertex AI URL path from project_id, region, and model. + + Extracts model from request body and builds the Vertex AI URL dynamically. + """ + + def __init__(self, project_id: str, region: str): + self.project_id = project_id + self.region = region + + def before_request( + self, hook_ctx: BeforeRequestContext, request: httpx.Request + ) -> httpx.Request: + if not request.content: + return request + + try: + body = json.loads(request.content.decode("utf-8")) + except (UnicodeDecodeError, json.JSONDecodeError): + # Non-JSON body (e.g. multipart upload) — pass through unmodified + return request + + model = body.get("model") + if not model: + logger.warning( + "GCPVertexAIPathHook: request body has no 'model' field; " + "Vertex AI path will not be constructed. " + "Operation: %s", + hook_ctx.operation_id, + ) + return request + + is_streaming = "stream" in hook_ctx.operation_id.lower() + specifier = "streamRawPredict" if is_streaming else "rawPredict" + + path = ( + f"/v1/projects/{self.project_id}/locations/{self.region}/" + f"publishers/mistralai/models/{model}:{specifier}" + ) + + return httpx.Request( + method=request.method, + url=request.url.copy_with(path=path), + headers=request.headers, + content=request.content, + ) + + +def init_hooks(_hooks: Hooks) -> None: + """Initialize hooks. Called by SDKHooks.__init__. + + Note: GCPVertexAIPathHook requires project_id and region, so it is + registered separately in MistralGCP.__init__ after those values are known. + """ diff --git a/packages/gcp/src/mistralai/gcp/client/_hooks/sdkhooks.py b/packages/gcp/src/mistralai/gcp/client/_hooks/sdkhooks.py new file mode 100644 index 00000000..2af4deed --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/_hooks/sdkhooks.py @@ -0,0 +1,76 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import httpx +from .types import ( + SDKInitHook, + BeforeRequestContext, + BeforeRequestHook, + AfterSuccessContext, + AfterSuccessHook, + AfterErrorContext, + AfterErrorHook, + Hooks, +) +from .registration import init_hooks +from typing import List, Optional, Tuple +from mistralai.gcp.client.httpclient import HttpClient + + +class SDKHooks(Hooks): + def __init__(self) -> None: + self.sdk_init_hooks: List[SDKInitHook] = [] + self.before_request_hooks: List[BeforeRequestHook] = [] + self.after_success_hooks: List[AfterSuccessHook] = [] + self.after_error_hooks: List[AfterErrorHook] = [] + init_hooks(self) + + def register_sdk_init_hook(self, hook: SDKInitHook) -> None: + self.sdk_init_hooks.append(hook) + + def register_before_request_hook(self, hook: BeforeRequestHook) -> None: + self.before_request_hooks.append(hook) + + def register_after_success_hook(self, hook: AfterSuccessHook) -> None: + self.after_success_hooks.append(hook) + + def register_after_error_hook(self, hook: AfterErrorHook) -> None: + self.after_error_hooks.append(hook) + + def sdk_init(self, base_url: str, client: HttpClient) -> Tuple[str, HttpClient]: + for hook in self.sdk_init_hooks: + base_url, client = hook.sdk_init(base_url, client) + return base_url, client + + def before_request( + self, hook_ctx: BeforeRequestContext, request: httpx.Request + ) -> httpx.Request: + for hook in self.before_request_hooks: + out = hook.before_request(hook_ctx, request) + if isinstance(out, Exception): + raise out + request = out + + return request + + def after_success( + self, hook_ctx: AfterSuccessContext, response: httpx.Response + ) -> httpx.Response: + for hook in self.after_success_hooks: + out = hook.after_success(hook_ctx, response) + if isinstance(out, Exception): + raise out + response = out + return response + + def after_error( + self, + hook_ctx: AfterErrorContext, + response: Optional[httpx.Response], + error: Optional[Exception], + ) -> Tuple[Optional[httpx.Response], Optional[Exception]]: + for hook in self.after_error_hooks: + result = hook.after_error(hook_ctx, response, error) + if isinstance(result, Exception): + raise result + response, error = result + return response, error diff --git a/packages/gcp/src/mistralai/gcp/client/_hooks/types.py b/packages/gcp/src/mistralai/gcp/client/_hooks/types.py new file mode 100644 index 00000000..ea95bed2 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/_hooks/types.py @@ -0,0 +1,113 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from abc import ABC, abstractmethod +import httpx +from mistralai.gcp.client.httpclient import HttpClient +from mistralai.gcp.client.sdkconfiguration import SDKConfiguration +from typing import Any, Callable, List, Optional, Tuple, Union + + +class HookContext: + config: SDKConfiguration + base_url: str + operation_id: str + oauth2_scopes: Optional[List[str]] = None + security_source: Optional[Union[Any, Callable[[], Any]]] = None + + def __init__( + self, + config: SDKConfiguration, + base_url: str, + operation_id: str, + oauth2_scopes: Optional[List[str]], + security_source: Optional[Union[Any, Callable[[], Any]]], + ): + self.config = config + self.base_url = base_url + self.operation_id = operation_id + self.oauth2_scopes = oauth2_scopes + self.security_source = security_source + + +class BeforeRequestContext(HookContext): + def __init__(self, hook_ctx: HookContext): + super().__init__( + hook_ctx.config, + hook_ctx.base_url, + hook_ctx.operation_id, + hook_ctx.oauth2_scopes, + hook_ctx.security_source, + ) + + +class AfterSuccessContext(HookContext): + def __init__(self, hook_ctx: HookContext): + super().__init__( + hook_ctx.config, + hook_ctx.base_url, + hook_ctx.operation_id, + hook_ctx.oauth2_scopes, + hook_ctx.security_source, + ) + + +class AfterErrorContext(HookContext): + def __init__(self, hook_ctx: HookContext): + super().__init__( + hook_ctx.config, + hook_ctx.base_url, + hook_ctx.operation_id, + hook_ctx.oauth2_scopes, + hook_ctx.security_source, + ) + + +class SDKInitHook(ABC): + @abstractmethod + def sdk_init(self, base_url: str, client: HttpClient) -> Tuple[str, HttpClient]: + pass + + +class BeforeRequestHook(ABC): + @abstractmethod + def before_request( + self, hook_ctx: BeforeRequestContext, request: httpx.Request + ) -> Union[httpx.Request, Exception]: + pass + + +class AfterSuccessHook(ABC): + @abstractmethod + def after_success( + self, hook_ctx: AfterSuccessContext, response: httpx.Response + ) -> Union[httpx.Response, Exception]: + pass + + +class AfterErrorHook(ABC): + @abstractmethod + def after_error( + self, + hook_ctx: AfterErrorContext, + response: Optional[httpx.Response], + error: Optional[Exception], + ) -> Union[Tuple[Optional[httpx.Response], Optional[Exception]], Exception]: + pass + + +class Hooks(ABC): + @abstractmethod + def register_sdk_init_hook(self, hook: SDKInitHook): + pass + + @abstractmethod + def register_before_request_hook(self, hook: BeforeRequestHook): + pass + + @abstractmethod + def register_after_success_hook(self, hook: AfterSuccessHook): + pass + + @abstractmethod + def register_after_error_hook(self, hook: AfterErrorHook): + pass diff --git a/packages/gcp/src/mistralai/gcp/client/_version.py b/packages/gcp/src/mistralai/gcp/client/_version.py new file mode 100644 index 00000000..7415341f --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/_version.py @@ -0,0 +1,15 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import importlib.metadata + +__title__: str = "mistralai-gcp" +__version__: str = "2.0.0rc1" +__openapi_doc_version__: str = "1.0.0" +__gen_version__: str = "2.841.0" +__user_agent__: str = "speakeasy-sdk/python 2.0.0rc1 2.841.0 1.0.0 mistralai-gcp" + +try: + if __package__ is not None: + __version__ = importlib.metadata.version(__package__) +except importlib.metadata.PackageNotFoundError: + pass diff --git a/packages/gcp/src/mistralai/gcp/client/basesdk.py b/packages/gcp/src/mistralai/gcp/client/basesdk.py new file mode 100644 index 00000000..b3edcb0a --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/basesdk.py @@ -0,0 +1,384 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .sdkconfiguration import SDKConfiguration +import httpx +from mistralai.gcp.client import errors, utils +from mistralai.gcp.client._hooks import ( + AfterErrorContext, + AfterSuccessContext, + BeforeRequestContext, +) +from mistralai.gcp.client.utils import ( + RetryConfig, + SerializedRequestBody, + get_body_content, + run_sync_in_thread, +) +from typing import Callable, List, Mapping, Optional, Tuple +from urllib.parse import parse_qs, urlparse + + +class BaseSDK: + sdk_configuration: SDKConfiguration + parent_ref: Optional[object] = None + """ + Reference to the root SDK instance, if any. This will prevent it from + being garbage collected while there are active streams. + """ + + def __init__( + self, + sdk_config: SDKConfiguration, + parent_ref: Optional[object] = None, + ) -> None: + self.sdk_configuration = sdk_config + self.parent_ref = parent_ref + + def _get_url(self, base_url, url_variables): + sdk_url, sdk_variables = self.sdk_configuration.get_server_details() + + if base_url is None: + base_url = sdk_url + + if url_variables is None: + url_variables = sdk_variables + + return utils.template_url(base_url, url_variables) + + def _build_request_async( + self, + method, + path, + base_url, + url_variables, + request, + request_body_required, + request_has_path_params, + request_has_query_params, + user_agent_header, + accept_header_value, + _globals=None, + security=None, + timeout_ms: Optional[int] = None, + get_serialized_body: Optional[ + Callable[[], Optional[SerializedRequestBody]] + ] = None, + url_override: Optional[str] = None, + http_headers: Optional[Mapping[str, str]] = None, + allow_empty_value: Optional[List[str]] = None, + ) -> httpx.Request: + client = self.sdk_configuration.async_client + return self._build_request_with_client( + client, + method, + path, + base_url, + url_variables, + request, + request_body_required, + request_has_path_params, + request_has_query_params, + user_agent_header, + accept_header_value, + _globals, + security, + timeout_ms, + get_serialized_body, + url_override, + http_headers, + allow_empty_value, + ) + + def _build_request( + self, + method, + path, + base_url, + url_variables, + request, + request_body_required, + request_has_path_params, + request_has_query_params, + user_agent_header, + accept_header_value, + _globals=None, + security=None, + timeout_ms: Optional[int] = None, + get_serialized_body: Optional[ + Callable[[], Optional[SerializedRequestBody]] + ] = None, + url_override: Optional[str] = None, + http_headers: Optional[Mapping[str, str]] = None, + allow_empty_value: Optional[List[str]] = None, + ) -> httpx.Request: + client = self.sdk_configuration.client + return self._build_request_with_client( + client, + method, + path, + base_url, + url_variables, + request, + request_body_required, + request_has_path_params, + request_has_query_params, + user_agent_header, + accept_header_value, + _globals, + security, + timeout_ms, + get_serialized_body, + url_override, + http_headers, + allow_empty_value, + ) + + def _build_request_with_client( + self, + client, + method, + path, + base_url, + url_variables, + request, + request_body_required, + request_has_path_params, + request_has_query_params, + user_agent_header, + accept_header_value, + _globals=None, + security=None, + timeout_ms: Optional[int] = None, + get_serialized_body: Optional[ + Callable[[], Optional[SerializedRequestBody]] + ] = None, + url_override: Optional[str] = None, + http_headers: Optional[Mapping[str, str]] = None, + allow_empty_value: Optional[List[str]] = None, + ) -> httpx.Request: + query_params = {} + + url = url_override + if url is None: + url = utils.generate_url( + self._get_url(base_url, url_variables), + path, + request if request_has_path_params else None, + _globals if request_has_path_params else None, + ) + + query_params = utils.get_query_params( + request if request_has_query_params else None, + _globals if request_has_query_params else None, + allow_empty_value, + ) + else: + # Pick up the query parameter from the override so they can be + # preserved when building the request later on (necessary as of + # httpx 0.28). + parsed_override = urlparse(str(url_override)) + query_params = parse_qs(parsed_override.query, keep_blank_values=True) + + headers = utils.get_headers(request, _globals) + headers["Accept"] = accept_header_value + headers[user_agent_header] = self.sdk_configuration.user_agent + + if security is not None: + if callable(security): + security = security() + + if security is not None: + security_headers, security_query_params = utils.get_security(security) + headers = {**headers, **security_headers} + query_params = {**query_params, **security_query_params} + + serialized_request_body = SerializedRequestBody() + if get_serialized_body is not None: + rb = get_serialized_body() + if request_body_required and rb is None: + raise ValueError("request body is required") + + if rb is not None: + serialized_request_body = rb + + if ( + serialized_request_body.media_type is not None + and serialized_request_body.media_type + not in ( + "multipart/form-data", + "multipart/mixed", + ) + ): + headers["content-type"] = serialized_request_body.media_type + + if http_headers is not None: + for header, value in http_headers.items(): + headers[header] = value + + timeout = timeout_ms / 1000 if timeout_ms is not None else None + + return client.build_request( + method, + url, + params=query_params, + content=serialized_request_body.content, + data=serialized_request_body.data, + files=serialized_request_body.files, + headers=headers, + timeout=timeout, + ) + + def do_request( + self, + hook_ctx, + request, + error_status_codes, + stream=False, + retry_config: Optional[Tuple[RetryConfig, List[str]]] = None, + ) -> httpx.Response: + client = self.sdk_configuration.client + logger = self.sdk_configuration.debug_logger + + hooks = self.sdk_configuration.__dict__["_hooks"] + + def do(): + http_res = None + try: + req = hooks.before_request(BeforeRequestContext(hook_ctx), request) + logger.debug( + "Request:\nMethod: %s\nURL: %s\nHeaders: %s\nBody: %s", + req.method, + req.url, + req.headers, + get_body_content(req), + ) + + if client is None: + raise ValueError("client is required") + + http_res = client.send(req, stream=stream) + except Exception as e: + _, e = hooks.after_error(AfterErrorContext(hook_ctx), None, e) + if e is not None: + logger.debug("Request Exception", exc_info=True) + raise e + + if http_res is None: + logger.debug("Raising no response SDK error") + raise errors.NoResponseError("No response received") + + logger.debug( + "Response:\nStatus Code: %s\nURL: %s\nHeaders: %s\nBody: %s", + http_res.status_code, + http_res.url, + http_res.headers, + "" if stream else http_res.text, + ) + + if utils.match_status_codes(error_status_codes, http_res.status_code): + result, err = hooks.after_error( + AfterErrorContext(hook_ctx), http_res, None + ) + if err is not None: + logger.debug("Request Exception", exc_info=True) + raise err + if result is not None: + http_res = result + else: + logger.debug("Raising unexpected SDK error") + raise errors.SDKError("Unexpected error occurred", http_res) + + return http_res + + if retry_config is not None: + http_res = utils.retry(do, utils.Retries(retry_config[0], retry_config[1])) + else: + http_res = do() + + if not utils.match_status_codes(error_status_codes, http_res.status_code): + http_res = hooks.after_success(AfterSuccessContext(hook_ctx), http_res) + + return http_res + + async def do_request_async( + self, + hook_ctx, + request, + error_status_codes, + stream=False, + retry_config: Optional[Tuple[RetryConfig, List[str]]] = None, + ) -> httpx.Response: + client = self.sdk_configuration.async_client + logger = self.sdk_configuration.debug_logger + + hooks = self.sdk_configuration.__dict__["_hooks"] + + async def do(): + http_res = None + try: + req = await run_sync_in_thread( + hooks.before_request, BeforeRequestContext(hook_ctx), request + ) + + logger.debug( + "Request:\nMethod: %s\nURL: %s\nHeaders: %s\nBody: %s", + req.method, + req.url, + req.headers, + get_body_content(req), + ) + + if client is None: + raise ValueError("client is required") + + http_res = await client.send(req, stream=stream) + except Exception as e: + _, e = await run_sync_in_thread( + hooks.after_error, AfterErrorContext(hook_ctx), None, e + ) + + if e is not None: + logger.debug("Request Exception", exc_info=True) + raise e + + if http_res is None: + logger.debug("Raising no response SDK error") + raise errors.NoResponseError("No response received") + + logger.debug( + "Response:\nStatus Code: %s\nURL: %s\nHeaders: %s\nBody: %s", + http_res.status_code, + http_res.url, + http_res.headers, + "" if stream else http_res.text, + ) + + if utils.match_status_codes(error_status_codes, http_res.status_code): + result, err = await run_sync_in_thread( + hooks.after_error, AfterErrorContext(hook_ctx), http_res, None + ) + + if err is not None: + logger.debug("Request Exception", exc_info=True) + raise err + if result is not None: + http_res = result + else: + logger.debug("Raising unexpected SDK error") + raise errors.SDKError("Unexpected error occurred", http_res) + + return http_res + + if retry_config is not None: + http_res = await utils.retry_async( + do, utils.Retries(retry_config[0], retry_config[1]) + ) + else: + http_res = await do() + + if not utils.match_status_codes(error_status_codes, http_res.status_code): + http_res = await run_sync_in_thread( + hooks.after_success, AfterSuccessContext(hook_ctx), http_res + ) + + return http_res diff --git a/packages/gcp/src/mistralai/gcp/client/chat.py b/packages/gcp/src/mistralai/gcp/client/chat.py new file mode 100644 index 00000000..925d69ed --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/chat.py @@ -0,0 +1,701 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from mistralai.gcp.client import errors, models, utils +from mistralai.gcp.client._hooks import HookContext +from mistralai.gcp.client.types import OptionalNullable, UNSET +from mistralai.gcp.client.utils import eventstreaming +from mistralai.gcp.client.utils.unmarshal_json_response import unmarshal_json_response +from typing import Any, Dict, List, Mapping, Optional, Union + + +class Chat(BaseSDK): + r"""Chat Completion API.""" + + def stream( + self, + *, + model: str, + messages: Union[ + List[models.ChatCompletionStreamRequestMessage], + List[models.ChatCompletionStreamRequestMessageTypedDict], + ], + temperature: OptionalNullable[float] = UNSET, + top_p: Optional[float] = None, + max_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = True, + stop: Optional[ + Union[ + models.ChatCompletionStreamRequestStop, + models.ChatCompletionStreamRequestStopTypedDict, + ] + ] = None, + random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + response_format: Optional[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = None, + tools: OptionalNullable[ + Union[List[models.Tool], List[models.ToolTypedDict]] + ] = UNSET, + tool_choice: Optional[ + Union[ + models.ChatCompletionStreamRequestToolChoice, + models.ChatCompletionStreamRequestToolChoiceTypedDict, + ] + ] = None, + presence_penalty: Optional[float] = None, + frequency_penalty: Optional[float] = None, + n: OptionalNullable[int] = UNSET, + prediction: Optional[ + Union[models.Prediction, models.PredictionTypedDict] + ] = None, + parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStream[models.CompletionEvent]: + r"""Stream chat completion + + Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + + :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. + :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param stream: + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. + :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param n: Number of completions to return for each request, input tokens are only billed once. + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + :param parallel_tool_calls: Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ChatCompletionStreamRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + metadata=metadata, + messages=utils.get_pydantic_model( + messages, List[models.ChatCompletionStreamRequestMessage] + ), + response_format=utils.get_pydantic_model( + response_format, Optional[models.ResponseFormat] + ), + tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), + tool_choice=utils.get_pydantic_model( + tool_choice, Optional[models.ChatCompletionStreamRequestToolChoice] + ), + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + n=n, + prediction=utils.get_pydantic_model( + prediction, Optional[models.Prediction] + ), + parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, + ) + + req = self._build_request( + method="POST", + path="/streamRawPredict", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ChatCompletionStreamRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="stream_chat", + oauth2_scopes=None, + security_source=self.sdk_configuration.security, + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStream( + http_res, + lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), + sentinel="[DONE]", + client_ref=self, + ) + if utils.match_response(http_res, "422", "application/json"): + http_res_text = utils.stream_to_text(http_res) + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res, http_res_text + ) + raise errors.HTTPValidationError(response_data, http_res, http_res_text) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) + + async def stream_async( + self, + *, + model: str, + messages: Union[ + List[models.ChatCompletionStreamRequestMessage], + List[models.ChatCompletionStreamRequestMessageTypedDict], + ], + temperature: OptionalNullable[float] = UNSET, + top_p: Optional[float] = None, + max_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = True, + stop: Optional[ + Union[ + models.ChatCompletionStreamRequestStop, + models.ChatCompletionStreamRequestStopTypedDict, + ] + ] = None, + random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + response_format: Optional[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = None, + tools: OptionalNullable[ + Union[List[models.Tool], List[models.ToolTypedDict]] + ] = UNSET, + tool_choice: Optional[ + Union[ + models.ChatCompletionStreamRequestToolChoice, + models.ChatCompletionStreamRequestToolChoiceTypedDict, + ] + ] = None, + presence_penalty: Optional[float] = None, + frequency_penalty: Optional[float] = None, + n: OptionalNullable[int] = UNSET, + prediction: Optional[ + Union[models.Prediction, models.PredictionTypedDict] + ] = None, + parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStreamAsync[models.CompletionEvent]: + r"""Stream chat completion + + Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + + :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. + :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param stream: + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. + :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param n: Number of completions to return for each request, input tokens are only billed once. + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + :param parallel_tool_calls: Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ChatCompletionStreamRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + metadata=metadata, + messages=utils.get_pydantic_model( + messages, List[models.ChatCompletionStreamRequestMessage] + ), + response_format=utils.get_pydantic_model( + response_format, Optional[models.ResponseFormat] + ), + tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), + tool_choice=utils.get_pydantic_model( + tool_choice, Optional[models.ChatCompletionStreamRequestToolChoice] + ), + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + n=n, + prediction=utils.get_pydantic_model( + prediction, Optional[models.Prediction] + ), + parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, + ) + + req = self._build_request_async( + method="POST", + path="/streamRawPredict", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ChatCompletionStreamRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="stream_chat", + oauth2_scopes=None, + security_source=self.sdk_configuration.security, + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStreamAsync( + http_res, + lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), + sentinel="[DONE]", + client_ref=self, + ) + if utils.match_response(http_res, "422", "application/json"): + http_res_text = await utils.stream_to_text_async(http_res) + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res, http_res_text + ) + raise errors.HTTPValidationError(response_data, http_res, http_res_text) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) + + def complete( + self, + *, + model: str, + messages: Union[ + List[models.ChatCompletionRequestMessage], + List[models.ChatCompletionRequestMessageTypedDict], + ], + temperature: OptionalNullable[float] = UNSET, + top_p: Optional[float] = None, + max_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = False, + stop: Optional[ + Union[ + models.ChatCompletionRequestStop, + models.ChatCompletionRequestStopTypedDict, + ] + ] = None, + random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + response_format: Optional[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = None, + tools: OptionalNullable[ + Union[List[models.Tool], List[models.ToolTypedDict]] + ] = UNSET, + tool_choice: Optional[ + Union[ + models.ChatCompletionRequestToolChoice, + models.ChatCompletionRequestToolChoiceTypedDict, + ] + ] = None, + presence_penalty: Optional[float] = None, + frequency_penalty: Optional[float] = None, + n: OptionalNullable[int] = UNSET, + prediction: Optional[ + Union[models.Prediction, models.PredictionTypedDict] + ] = None, + parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ChatCompletionResponse: + r"""Chat Completion + + :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. + :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. + :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param n: Number of completions to return for each request, input tokens are only billed once. + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + :param parallel_tool_calls: Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ChatCompletionRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + metadata=metadata, + messages=utils.get_pydantic_model( + messages, List[models.ChatCompletionRequestMessage] + ), + response_format=utils.get_pydantic_model( + response_format, Optional[models.ResponseFormat] + ), + tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), + tool_choice=utils.get_pydantic_model( + tool_choice, Optional[models.ChatCompletionRequestToolChoice] + ), + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + n=n, + prediction=utils.get_pydantic_model( + prediction, Optional[models.Prediction] + ), + parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, + ) + + req = self._build_request( + method="POST", + path="/rawPredict", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ChatCompletionRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="chat_completion_v1_chat_completions_post", + oauth2_scopes=None, + security_source=self.sdk_configuration.security, + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ChatCompletionResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def complete_async( + self, + *, + model: str, + messages: Union[ + List[models.ChatCompletionRequestMessage], + List[models.ChatCompletionRequestMessageTypedDict], + ], + temperature: OptionalNullable[float] = UNSET, + top_p: Optional[float] = None, + max_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = False, + stop: Optional[ + Union[ + models.ChatCompletionRequestStop, + models.ChatCompletionRequestStopTypedDict, + ] + ] = None, + random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + response_format: Optional[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = None, + tools: OptionalNullable[ + Union[List[models.Tool], List[models.ToolTypedDict]] + ] = UNSET, + tool_choice: Optional[ + Union[ + models.ChatCompletionRequestToolChoice, + models.ChatCompletionRequestToolChoiceTypedDict, + ] + ] = None, + presence_penalty: Optional[float] = None, + frequency_penalty: Optional[float] = None, + n: OptionalNullable[int] = UNSET, + prediction: Optional[ + Union[models.Prediction, models.PredictionTypedDict] + ] = None, + parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ChatCompletionResponse: + r"""Chat Completion + + :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. + :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. + :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param n: Number of completions to return for each request, input tokens are only billed once. + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + :param parallel_tool_calls: Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ChatCompletionRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + metadata=metadata, + messages=utils.get_pydantic_model( + messages, List[models.ChatCompletionRequestMessage] + ), + response_format=utils.get_pydantic_model( + response_format, Optional[models.ResponseFormat] + ), + tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), + tool_choice=utils.get_pydantic_model( + tool_choice, Optional[models.ChatCompletionRequestToolChoice] + ), + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + n=n, + prediction=utils.get_pydantic_model( + prediction, Optional[models.Prediction] + ), + parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, + ) + + req = self._build_request_async( + method="POST", + path="/rawPredict", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ChatCompletionRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="chat_completion_v1_chat_completions_post", + oauth2_scopes=None, + security_source=self.sdk_configuration.security, + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ChatCompletionResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) diff --git a/packages/gcp/src/mistralai/gcp/client/errors/__init__.py b/packages/gcp/src/mistralai/gcp/client/errors/__init__.py new file mode 100644 index 00000000..00c8ee00 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/errors/__init__.py @@ -0,0 +1,39 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .mistralgcperror import MistralGCPError +from typing import Any, TYPE_CHECKING + +from mistralai.gcp.client.utils.dynamic_imports import lazy_getattr, lazy_dir + +if TYPE_CHECKING: + from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData + from .no_response_error import NoResponseError + from .responsevalidationerror import ResponseValidationError + from .sdkerror import SDKError + +__all__ = [ + "HTTPValidationError", + "HTTPValidationErrorData", + "MistralGCPError", + "NoResponseError", + "ResponseValidationError", + "SDKError", +] + +_dynamic_imports: dict[str, str] = { + "HTTPValidationError": ".httpvalidationerror", + "HTTPValidationErrorData": ".httpvalidationerror", + "NoResponseError": ".no_response_error", + "ResponseValidationError": ".responsevalidationerror", + "SDKError": ".sdkerror", +} + + +def __getattr__(attr_name: str) -> Any: + return lazy_getattr( + attr_name, package=__package__, dynamic_imports=_dynamic_imports + ) + + +def __dir__(): + return lazy_dir(dynamic_imports=_dynamic_imports) diff --git a/packages/gcp/src/mistralai/gcp/client/errors/httpvalidationerror.py b/packages/gcp/src/mistralai/gcp/client/errors/httpvalidationerror.py new file mode 100644 index 00000000..59806819 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/errors/httpvalidationerror.py @@ -0,0 +1,28 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from dataclasses import dataclass, field +import httpx +from mistralai.gcp.client.errors import MistralGCPError +from mistralai.gcp.client.models import validationerror as models_validationerror +from mistralai.gcp.client.types import BaseModel +from typing import List, Optional + + +class HTTPValidationErrorData(BaseModel): + detail: Optional[List[models_validationerror.ValidationError]] = None + + +@dataclass(unsafe_hash=True) +class HTTPValidationError(MistralGCPError): + data: HTTPValidationErrorData = field(hash=False) + + def __init__( + self, + data: HTTPValidationErrorData, + raw_response: httpx.Response, + body: Optional[str] = None, + ): + message = body or raw_response.text + super().__init__(message, raw_response, body) + object.__setattr__(self, "data", data) diff --git a/packages/gcp/src/mistralai/gcp/client/errors/mistralgcperror.py b/packages/gcp/src/mistralai/gcp/client/errors/mistralgcperror.py new file mode 100644 index 00000000..9de91bf2 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/errors/mistralgcperror.py @@ -0,0 +1,30 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import httpx +from typing import Optional +from dataclasses import dataclass, field + + +@dataclass(unsafe_hash=True) +class MistralGCPError(Exception): + """The base class for all HTTP error responses.""" + + message: str + status_code: int + body: str + headers: httpx.Headers = field(hash=False) + raw_response: httpx.Response = field(hash=False) + + def __init__( + self, message: str, raw_response: httpx.Response, body: Optional[str] = None + ): + object.__setattr__(self, "message", message) + object.__setattr__(self, "status_code", raw_response.status_code) + object.__setattr__( + self, "body", body if body is not None else raw_response.text + ) + object.__setattr__(self, "headers", raw_response.headers) + object.__setattr__(self, "raw_response", raw_response) + + def __str__(self): + return self.message diff --git a/packages/gcp/src/mistralai/gcp/client/errors/no_response_error.py b/packages/gcp/src/mistralai/gcp/client/errors/no_response_error.py new file mode 100644 index 00000000..1deab64b --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/errors/no_response_error.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from dataclasses import dataclass + + +@dataclass(unsafe_hash=True) +class NoResponseError(Exception): + """Error raised when no HTTP response is received from the server.""" + + message: str + + def __init__(self, message: str = "No response received"): + object.__setattr__(self, "message", message) + super().__init__(message) + + def __str__(self): + return self.message diff --git a/packages/gcp/src/mistralai/gcp/client/errors/responsevalidationerror.py b/packages/gcp/src/mistralai/gcp/client/errors/responsevalidationerror.py new file mode 100644 index 00000000..e8bd83c1 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/errors/responsevalidationerror.py @@ -0,0 +1,27 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import httpx +from typing import Optional +from dataclasses import dataclass + +from mistralai.gcp.client.errors import MistralGCPError + + +@dataclass(unsafe_hash=True) +class ResponseValidationError(MistralGCPError): + """Error raised when there is a type mismatch between the response data and the expected Pydantic model.""" + + def __init__( + self, + message: str, + raw_response: httpx.Response, + cause: Exception, + body: Optional[str] = None, + ): + message = f"{message}: {cause}" + super().__init__(message, raw_response, body) + + @property + def cause(self): + """Normally the Pydantic ValidationError""" + return self.__cause__ diff --git a/packages/gcp/src/mistralai/gcp/client/errors/sdkerror.py b/packages/gcp/src/mistralai/gcp/client/errors/sdkerror.py new file mode 100644 index 00000000..69809246 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/errors/sdkerror.py @@ -0,0 +1,40 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import httpx +from typing import Optional +from dataclasses import dataclass + +from mistralai.gcp.client.errors import MistralGCPError + +MAX_MESSAGE_LEN = 10_000 + + +@dataclass(unsafe_hash=True) +class SDKError(MistralGCPError): + """The fallback error class if no more specific error class is matched.""" + + def __init__( + self, message: str, raw_response: httpx.Response, body: Optional[str] = None + ): + body_display = body or raw_response.text or '""' + + if message: + message += ": " + message += f"Status {raw_response.status_code}" + + headers = raw_response.headers + content_type = headers.get("content-type", '""') + if content_type != "application/json": + if " " in content_type: + content_type = f'"{content_type}"' + message += f" Content-Type {content_type}" + + if len(body_display) > MAX_MESSAGE_LEN: + truncated = body_display[:MAX_MESSAGE_LEN] + remaining = len(body_display) - MAX_MESSAGE_LEN + body_display = f"{truncated}...and {remaining} more chars" + + message += f". Body: {body_display}" + message = message.strip() + + super().__init__(message, raw_response, body) diff --git a/packages/gcp/src/mistralai/gcp/client/fim.py b/packages/gcp/src/mistralai/gcp/client/fim.py new file mode 100644 index 00000000..4202102a --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/fim.py @@ -0,0 +1,533 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from mistralai.gcp.client import errors, models, utils +from mistralai.gcp.client._hooks import HookContext +from mistralai.gcp.client.types import OptionalNullable, UNSET +from mistralai.gcp.client.utils import eventstreaming +from mistralai.gcp.client.utils.unmarshal_json_response import unmarshal_json_response +from typing import Any, Dict, Mapping, Optional, Union + + +class Fim(BaseSDK): + r"""Fill-in-the-middle API.""" + + def stream( + self, + *, + model: str, + prompt: str, + temperature: OptionalNullable[float] = UNSET, + top_p: Optional[float] = 1, + max_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = True, + stop: Optional[ + Union[ + models.FIMCompletionStreamRequestStop, + models.FIMCompletionStreamRequestStopTypedDict, + ] + ] = None, + random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + suffix: OptionalNullable[str] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStream[models.CompletionEvent]: + r"""Stream fim completion + + Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + + :param model: ID of the model with FIM to use. + :param prompt: The text/code to complete. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param stream: + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: + :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. + :param min_tokens: The minimum number of tokens to generate in the completion. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.FIMCompletionStreamRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + metadata=metadata, + prompt=prompt, + suffix=suffix, + min_tokens=min_tokens, + ) + + req = self._build_request( + method="POST", + path="/streamRawPredict#fim", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.FIMCompletionStreamRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="stream_fim", + oauth2_scopes=None, + security_source=self.sdk_configuration.security, + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStream( + http_res, + lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), + sentinel="[DONE]", + client_ref=self, + ) + if utils.match_response(http_res, "422", "application/json"): + http_res_text = utils.stream_to_text(http_res) + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res, http_res_text + ) + raise errors.HTTPValidationError(response_data, http_res, http_res_text) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) + + async def stream_async( + self, + *, + model: str, + prompt: str, + temperature: OptionalNullable[float] = UNSET, + top_p: Optional[float] = 1, + max_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = True, + stop: Optional[ + Union[ + models.FIMCompletionStreamRequestStop, + models.FIMCompletionStreamRequestStopTypedDict, + ] + ] = None, + random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + suffix: OptionalNullable[str] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStreamAsync[models.CompletionEvent]: + r"""Stream fim completion + + Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + + :param model: ID of the model with FIM to use. + :param prompt: The text/code to complete. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param stream: + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: + :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. + :param min_tokens: The minimum number of tokens to generate in the completion. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.FIMCompletionStreamRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + metadata=metadata, + prompt=prompt, + suffix=suffix, + min_tokens=min_tokens, + ) + + req = self._build_request_async( + method="POST", + path="/streamRawPredict#fim", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.FIMCompletionStreamRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="stream_fim", + oauth2_scopes=None, + security_source=self.sdk_configuration.security, + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStreamAsync( + http_res, + lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), + sentinel="[DONE]", + client_ref=self, + ) + if utils.match_response(http_res, "422", "application/json"): + http_res_text = await utils.stream_to_text_async(http_res) + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res, http_res_text + ) + raise errors.HTTPValidationError(response_data, http_res, http_res_text) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) + + def complete( + self, + *, + model: str, + prompt: str, + temperature: OptionalNullable[float] = UNSET, + top_p: Optional[float] = 1, + max_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = False, + stop: Optional[ + Union[ + models.FIMCompletionRequestStop, + models.FIMCompletionRequestStopTypedDict, + ] + ] = None, + random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + suffix: OptionalNullable[str] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.FIMCompletionResponse: + r"""Fim Completion + + FIM completion. + + :param model: ID of the model with FIM to use. + :param prompt: The text/code to complete. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: + :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. + :param min_tokens: The minimum number of tokens to generate in the completion. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.FIMCompletionRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + metadata=metadata, + prompt=prompt, + suffix=suffix, + min_tokens=min_tokens, + ) + + req = self._build_request( + method="POST", + path="/rawPredict#fim", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.FIMCompletionRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="fim_completion_v1_fim_completions_post", + oauth2_scopes=None, + security_source=self.sdk_configuration.security, + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.FIMCompletionResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def complete_async( + self, + *, + model: str, + prompt: str, + temperature: OptionalNullable[float] = UNSET, + top_p: Optional[float] = 1, + max_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = False, + stop: Optional[ + Union[ + models.FIMCompletionRequestStop, + models.FIMCompletionRequestStopTypedDict, + ] + ] = None, + random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + suffix: OptionalNullable[str] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.FIMCompletionResponse: + r"""Fim Completion + + FIM completion. + + :param model: ID of the model with FIM to use. + :param prompt: The text/code to complete. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: + :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. + :param min_tokens: The minimum number of tokens to generate in the completion. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.FIMCompletionRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + metadata=metadata, + prompt=prompt, + suffix=suffix, + min_tokens=min_tokens, + ) + + req = self._build_request_async( + method="POST", + path="/rawPredict#fim", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.FIMCompletionRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="fim_completion_v1_fim_completions_post", + oauth2_scopes=None, + security_source=self.sdk_configuration.security, + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.FIMCompletionResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) diff --git a/packages/gcp/src/mistralai/gcp/client/httpclient.py b/packages/gcp/src/mistralai/gcp/client/httpclient.py new file mode 100644 index 00000000..89560b56 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/httpclient.py @@ -0,0 +1,125 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +# pyright: reportReturnType = false +import asyncio +from typing_extensions import Protocol, runtime_checkable +import httpx +from typing import Any, Optional, Union + + +@runtime_checkable +class HttpClient(Protocol): + def send( + self, + request: httpx.Request, + *, + stream: bool = False, + auth: Union[ + httpx._types.AuthTypes, httpx._client.UseClientDefault, None + ] = httpx.USE_CLIENT_DEFAULT, + follow_redirects: Union[ + bool, httpx._client.UseClientDefault + ] = httpx.USE_CLIENT_DEFAULT, + ) -> httpx.Response: + pass + + def build_request( + self, + method: str, + url: httpx._types.URLTypes, + *, + content: Optional[httpx._types.RequestContent] = None, + data: Optional[httpx._types.RequestData] = None, + files: Optional[httpx._types.RequestFiles] = None, + json: Optional[Any] = None, + params: Optional[httpx._types.QueryParamTypes] = None, + headers: Optional[httpx._types.HeaderTypes] = None, + cookies: Optional[httpx._types.CookieTypes] = None, + timeout: Union[ + httpx._types.TimeoutTypes, httpx._client.UseClientDefault + ] = httpx.USE_CLIENT_DEFAULT, + extensions: Optional[httpx._types.RequestExtensions] = None, + ) -> httpx.Request: + pass + + def close(self) -> None: + pass + + +@runtime_checkable +class AsyncHttpClient(Protocol): + async def send( + self, + request: httpx.Request, + *, + stream: bool = False, + auth: Union[ + httpx._types.AuthTypes, httpx._client.UseClientDefault, None + ] = httpx.USE_CLIENT_DEFAULT, + follow_redirects: Union[ + bool, httpx._client.UseClientDefault + ] = httpx.USE_CLIENT_DEFAULT, + ) -> httpx.Response: + pass + + def build_request( + self, + method: str, + url: httpx._types.URLTypes, + *, + content: Optional[httpx._types.RequestContent] = None, + data: Optional[httpx._types.RequestData] = None, + files: Optional[httpx._types.RequestFiles] = None, + json: Optional[Any] = None, + params: Optional[httpx._types.QueryParamTypes] = None, + headers: Optional[httpx._types.HeaderTypes] = None, + cookies: Optional[httpx._types.CookieTypes] = None, + timeout: Union[ + httpx._types.TimeoutTypes, httpx._client.UseClientDefault + ] = httpx.USE_CLIENT_DEFAULT, + extensions: Optional[httpx._types.RequestExtensions] = None, + ) -> httpx.Request: + pass + + async def aclose(self) -> None: + pass + + +class ClientOwner(Protocol): + client: Union[HttpClient, None] + async_client: Union[AsyncHttpClient, None] + + +def close_clients( + owner: ClientOwner, + sync_client: Union[HttpClient, None], + sync_client_supplied: bool, + async_client: Union[AsyncHttpClient, None], + async_client_supplied: bool, +) -> None: + """ + A finalizer function that is meant to be used with weakref.finalize to close + httpx clients used by an SDK so that underlying resources can be garbage + collected. + """ + + # Unset the client/async_client properties so there are no more references + # to them from the owning SDK instance and they can be reaped. + owner.client = None + owner.async_client = None + if sync_client is not None and not sync_client_supplied: + try: + sync_client.close() + except Exception: + pass + + if async_client is not None and not async_client_supplied: + try: + loop = asyncio.get_running_loop() + asyncio.run_coroutine_threadsafe(async_client.aclose(), loop) + except RuntimeError: + try: + asyncio.run(async_client.aclose()) + except RuntimeError: + # best effort + pass diff --git a/packages/gcp/src/mistralai/gcp/client/models/__init__.py b/packages/gcp/src/mistralai/gcp/client/models/__init__.py new file mode 100644 index 00000000..575f6404 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/__init__.py @@ -0,0 +1,367 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from typing import Any, TYPE_CHECKING + +from mistralai.gcp.client.utils.dynamic_imports import lazy_getattr, lazy_dir + +if TYPE_CHECKING: + from .assistantmessage import ( + AssistantMessage, + AssistantMessageContent, + AssistantMessageContentTypedDict, + AssistantMessageTypedDict, + ) + from .chatcompletionchoice import ( + ChatCompletionChoice, + ChatCompletionChoiceFinishReason, + ChatCompletionChoiceTypedDict, + ) + from .chatcompletionrequest import ( + ChatCompletionRequest, + ChatCompletionRequestMessage, + ChatCompletionRequestMessageTypedDict, + ChatCompletionRequestStop, + ChatCompletionRequestStopTypedDict, + ChatCompletionRequestToolChoice, + ChatCompletionRequestToolChoiceTypedDict, + ChatCompletionRequestTypedDict, + ) + from .chatcompletionresponse import ( + ChatCompletionResponse, + ChatCompletionResponseTypedDict, + ) + from .chatcompletionstreamrequest import ( + ChatCompletionStreamRequest, + ChatCompletionStreamRequestMessage, + ChatCompletionStreamRequestMessageTypedDict, + ChatCompletionStreamRequestStop, + ChatCompletionStreamRequestStopTypedDict, + ChatCompletionStreamRequestToolChoice, + ChatCompletionStreamRequestToolChoiceTypedDict, + ChatCompletionStreamRequestTypedDict, + ) + from .completionchunk import CompletionChunk, CompletionChunkTypedDict + from .completionevent import CompletionEvent, CompletionEventTypedDict + from .completionresponsestreamchoice import ( + CompletionResponseStreamChoice, + CompletionResponseStreamChoiceFinishReason, + CompletionResponseStreamChoiceTypedDict, + ) + from .contentchunk import ContentChunk, ContentChunkTypedDict, UnknownContentChunk + from .deltamessage import ( + DeltaMessage, + DeltaMessageContent, + DeltaMessageContentTypedDict, + DeltaMessageTypedDict, + ) + from .fimcompletionrequest import ( + FIMCompletionRequest, + FIMCompletionRequestStop, + FIMCompletionRequestStopTypedDict, + FIMCompletionRequestTypedDict, + ) + from .fimcompletionresponse import ( + FIMCompletionResponse, + FIMCompletionResponseTypedDict, + ) + from .fimcompletionstreamrequest import ( + FIMCompletionStreamRequest, + FIMCompletionStreamRequestStop, + FIMCompletionStreamRequestStopTypedDict, + FIMCompletionStreamRequestTypedDict, + ) + from .function import Function, FunctionTypedDict + from .functioncall import ( + Arguments, + ArgumentsTypedDict, + FunctionCall, + FunctionCallTypedDict, + ) + from .functionname import FunctionName, FunctionNameTypedDict + from .imagedetail import ImageDetail + from .imageurl import ImageURL, ImageURLTypedDict + from .imageurlchunk import ( + ImageURLChunk, + ImageURLChunkTypedDict, + ImageURLUnion, + ImageURLUnionTypedDict, + ) + from .jsonschema import JSONSchema, JSONSchemaTypedDict + from .mistralpromptmode import MistralPromptMode + from .prediction import Prediction, PredictionTypedDict + from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict + from .responseformat import ResponseFormat, ResponseFormatTypedDict + from .responseformats import ResponseFormats + from .security import Security, SecurityTypedDict + from .systemmessage import ( + SystemMessage, + SystemMessageContent, + SystemMessageContentTypedDict, + SystemMessageTypedDict, + ) + from .systemmessagecontentchunks import ( + SystemMessageContentChunks, + SystemMessageContentChunksTypedDict, + ) + from .textchunk import TextChunk, TextChunkTypedDict + from .thinkchunk import ThinkChunk, ThinkChunkTypedDict, Thinking, ThinkingTypedDict + from .tool import Tool, ToolTypedDict + from .toolcall import ToolCall, ToolCallTypedDict + from .toolchoice import ToolChoice, ToolChoiceTypedDict + from .toolchoiceenum import ToolChoiceEnum + from .toolmessage import ( + ToolMessage, + ToolMessageContent, + ToolMessageContentTypedDict, + ToolMessageTypedDict, + ) + from .tooltypes import ToolTypes + from .usageinfo import UsageInfo, UsageInfoTypedDict + from .usermessage import ( + UserMessage, + UserMessageContent, + UserMessageContentTypedDict, + UserMessageTypedDict, + ) + from .validationerror import ( + Loc, + LocTypedDict, + ValidationError, + ValidationErrorTypedDict, + ) + +__all__ = [ + "Arguments", + "ArgumentsTypedDict", + "AssistantMessage", + "AssistantMessageContent", + "AssistantMessageContentTypedDict", + "AssistantMessageTypedDict", + "ChatCompletionChoice", + "ChatCompletionChoiceFinishReason", + "ChatCompletionChoiceTypedDict", + "ChatCompletionRequest", + "ChatCompletionRequestMessage", + "ChatCompletionRequestMessageTypedDict", + "ChatCompletionRequestStop", + "ChatCompletionRequestStopTypedDict", + "ChatCompletionRequestToolChoice", + "ChatCompletionRequestToolChoiceTypedDict", + "ChatCompletionRequestTypedDict", + "ChatCompletionResponse", + "ChatCompletionResponseTypedDict", + "ChatCompletionStreamRequest", + "ChatCompletionStreamRequestMessage", + "ChatCompletionStreamRequestMessageTypedDict", + "ChatCompletionStreamRequestStop", + "ChatCompletionStreamRequestStopTypedDict", + "ChatCompletionStreamRequestToolChoice", + "ChatCompletionStreamRequestToolChoiceTypedDict", + "ChatCompletionStreamRequestTypedDict", + "CompletionChunk", + "CompletionChunkTypedDict", + "CompletionEvent", + "CompletionEventTypedDict", + "CompletionResponseStreamChoice", + "CompletionResponseStreamChoiceFinishReason", + "CompletionResponseStreamChoiceTypedDict", + "ContentChunk", + "ContentChunkTypedDict", + "DeltaMessage", + "DeltaMessageContent", + "DeltaMessageContentTypedDict", + "DeltaMessageTypedDict", + "FIMCompletionRequest", + "FIMCompletionRequestStop", + "FIMCompletionRequestStopTypedDict", + "FIMCompletionRequestTypedDict", + "FIMCompletionResponse", + "FIMCompletionResponseTypedDict", + "FIMCompletionStreamRequest", + "FIMCompletionStreamRequestStop", + "FIMCompletionStreamRequestStopTypedDict", + "FIMCompletionStreamRequestTypedDict", + "Function", + "FunctionCall", + "FunctionCallTypedDict", + "FunctionName", + "FunctionNameTypedDict", + "FunctionTypedDict", + "ImageDetail", + "ImageURL", + "ImageURLChunk", + "ImageURLChunkTypedDict", + "ImageURLTypedDict", + "ImageURLUnion", + "ImageURLUnionTypedDict", + "JSONSchema", + "JSONSchemaTypedDict", + "Loc", + "LocTypedDict", + "MistralPromptMode", + "Prediction", + "PredictionTypedDict", + "ReferenceChunk", + "ReferenceChunkTypedDict", + "ResponseFormat", + "ResponseFormatTypedDict", + "ResponseFormats", + "Security", + "SecurityTypedDict", + "SystemMessage", + "SystemMessageContent", + "SystemMessageContentChunks", + "SystemMessageContentChunksTypedDict", + "SystemMessageContentTypedDict", + "SystemMessageTypedDict", + "TextChunk", + "TextChunkTypedDict", + "ThinkChunk", + "ThinkChunkTypedDict", + "Thinking", + "ThinkingTypedDict", + "Tool", + "ToolCall", + "ToolCallTypedDict", + "ToolChoice", + "ToolChoiceEnum", + "ToolChoiceTypedDict", + "ToolMessage", + "ToolMessageContent", + "ToolMessageContentTypedDict", + "ToolMessageTypedDict", + "ToolTypedDict", + "ToolTypes", + "UnknownContentChunk", + "UsageInfo", + "UsageInfoTypedDict", + "UserMessage", + "UserMessageContent", + "UserMessageContentTypedDict", + "UserMessageTypedDict", + "ValidationError", + "ValidationErrorTypedDict", +] + +_dynamic_imports: dict[str, str] = { + "AssistantMessage": ".assistantmessage", + "AssistantMessageContent": ".assistantmessage", + "AssistantMessageContentTypedDict": ".assistantmessage", + "AssistantMessageTypedDict": ".assistantmessage", + "ChatCompletionChoice": ".chatcompletionchoice", + "ChatCompletionChoiceFinishReason": ".chatcompletionchoice", + "ChatCompletionChoiceTypedDict": ".chatcompletionchoice", + "ChatCompletionRequest": ".chatcompletionrequest", + "ChatCompletionRequestMessage": ".chatcompletionrequest", + "ChatCompletionRequestMessageTypedDict": ".chatcompletionrequest", + "ChatCompletionRequestStop": ".chatcompletionrequest", + "ChatCompletionRequestStopTypedDict": ".chatcompletionrequest", + "ChatCompletionRequestToolChoice": ".chatcompletionrequest", + "ChatCompletionRequestToolChoiceTypedDict": ".chatcompletionrequest", + "ChatCompletionRequestTypedDict": ".chatcompletionrequest", + "ChatCompletionResponse": ".chatcompletionresponse", + "ChatCompletionResponseTypedDict": ".chatcompletionresponse", + "ChatCompletionStreamRequest": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestMessage": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestMessageTypedDict": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestStop": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestStopTypedDict": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestToolChoice": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestToolChoiceTypedDict": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestTypedDict": ".chatcompletionstreamrequest", + "CompletionChunk": ".completionchunk", + "CompletionChunkTypedDict": ".completionchunk", + "CompletionEvent": ".completionevent", + "CompletionEventTypedDict": ".completionevent", + "CompletionResponseStreamChoice": ".completionresponsestreamchoice", + "CompletionResponseStreamChoiceFinishReason": ".completionresponsestreamchoice", + "CompletionResponseStreamChoiceTypedDict": ".completionresponsestreamchoice", + "ContentChunk": ".contentchunk", + "ContentChunkTypedDict": ".contentchunk", + "UnknownContentChunk": ".contentchunk", + "DeltaMessage": ".deltamessage", + "DeltaMessageContent": ".deltamessage", + "DeltaMessageContentTypedDict": ".deltamessage", + "DeltaMessageTypedDict": ".deltamessage", + "FIMCompletionRequest": ".fimcompletionrequest", + "FIMCompletionRequestStop": ".fimcompletionrequest", + "FIMCompletionRequestStopTypedDict": ".fimcompletionrequest", + "FIMCompletionRequestTypedDict": ".fimcompletionrequest", + "FIMCompletionResponse": ".fimcompletionresponse", + "FIMCompletionResponseTypedDict": ".fimcompletionresponse", + "FIMCompletionStreamRequest": ".fimcompletionstreamrequest", + "FIMCompletionStreamRequestStop": ".fimcompletionstreamrequest", + "FIMCompletionStreamRequestStopTypedDict": ".fimcompletionstreamrequest", + "FIMCompletionStreamRequestTypedDict": ".fimcompletionstreamrequest", + "Function": ".function", + "FunctionTypedDict": ".function", + "Arguments": ".functioncall", + "ArgumentsTypedDict": ".functioncall", + "FunctionCall": ".functioncall", + "FunctionCallTypedDict": ".functioncall", + "FunctionName": ".functionname", + "FunctionNameTypedDict": ".functionname", + "ImageDetail": ".imagedetail", + "ImageURL": ".imageurl", + "ImageURLTypedDict": ".imageurl", + "ImageURLChunk": ".imageurlchunk", + "ImageURLChunkTypedDict": ".imageurlchunk", + "ImageURLUnion": ".imageurlchunk", + "ImageURLUnionTypedDict": ".imageurlchunk", + "JSONSchema": ".jsonschema", + "JSONSchemaTypedDict": ".jsonschema", + "MistralPromptMode": ".mistralpromptmode", + "Prediction": ".prediction", + "PredictionTypedDict": ".prediction", + "ReferenceChunk": ".referencechunk", + "ReferenceChunkTypedDict": ".referencechunk", + "ResponseFormat": ".responseformat", + "ResponseFormatTypedDict": ".responseformat", + "ResponseFormats": ".responseformats", + "Security": ".security", + "SecurityTypedDict": ".security", + "SystemMessage": ".systemmessage", + "SystemMessageContent": ".systemmessage", + "SystemMessageContentTypedDict": ".systemmessage", + "SystemMessageTypedDict": ".systemmessage", + "SystemMessageContentChunks": ".systemmessagecontentchunks", + "SystemMessageContentChunksTypedDict": ".systemmessagecontentchunks", + "TextChunk": ".textchunk", + "TextChunkTypedDict": ".textchunk", + "ThinkChunk": ".thinkchunk", + "ThinkChunkTypedDict": ".thinkchunk", + "Thinking": ".thinkchunk", + "ThinkingTypedDict": ".thinkchunk", + "Tool": ".tool", + "ToolTypedDict": ".tool", + "ToolCall": ".toolcall", + "ToolCallTypedDict": ".toolcall", + "ToolChoice": ".toolchoice", + "ToolChoiceTypedDict": ".toolchoice", + "ToolChoiceEnum": ".toolchoiceenum", + "ToolMessage": ".toolmessage", + "ToolMessageContent": ".toolmessage", + "ToolMessageContentTypedDict": ".toolmessage", + "ToolMessageTypedDict": ".toolmessage", + "ToolTypes": ".tooltypes", + "UsageInfo": ".usageinfo", + "UsageInfoTypedDict": ".usageinfo", + "UserMessage": ".usermessage", + "UserMessageContent": ".usermessage", + "UserMessageContentTypedDict": ".usermessage", + "UserMessageTypedDict": ".usermessage", + "Loc": ".validationerror", + "LocTypedDict": ".validationerror", + "ValidationError": ".validationerror", + "ValidationErrorTypedDict": ".validationerror", +} + + +def __getattr__(attr_name: str) -> Any: + return lazy_getattr( + attr_name, package=__package__, dynamic_imports=_dynamic_imports + ) + + +def __dir__(): + return lazy_dir(dynamic_imports=_dynamic_imports) diff --git a/packages/gcp/src/mistralai/gcp/client/models/assistantmessage.py b/packages/gcp/src/mistralai/gcp/client/models/assistantmessage.py new file mode 100644 index 00000000..702ac470 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/assistantmessage.py @@ -0,0 +1,83 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .contentchunk import ContentChunk, ContentChunkTypedDict +from .toolcall import ToolCall, ToolCallTypedDict +from mistralai.gcp.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.gcp.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +AssistantMessageContentTypedDict = TypeAliasType( + "AssistantMessageContentTypedDict", Union[str, List[ContentChunkTypedDict]] +) + + +AssistantMessageContent = TypeAliasType( + "AssistantMessageContent", Union[str, List[ContentChunk]] +) + + +class AssistantMessageTypedDict(TypedDict): + role: Literal["assistant"] + content: NotRequired[Nullable[AssistantMessageContentTypedDict]] + tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] + prefix: NotRequired[bool] + r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" + + +class AssistantMessage(BaseModel): + role: Annotated[ + Annotated[ + Optional[Literal["assistant"]], AfterValidator(validate_const("assistant")) + ], + pydantic.Field(alias="role"), + ] = "assistant" + + content: OptionalNullable[AssistantMessageContent] = UNSET + + tool_calls: OptionalNullable[List[ToolCall]] = UNSET + + prefix: Optional[bool] = False + r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["role", "content", "tool_calls", "prefix"]) + nullable_fields = set(["content", "tool_calls"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + AssistantMessage.model_rebuild() +except NameError: + pass diff --git a/packages/gcp/src/mistralai/gcp/client/models/chatcompletionchoice.py b/packages/gcp/src/mistralai/gcp/client/models/chatcompletionchoice.py new file mode 100644 index 00000000..ae5a2fbf --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/chatcompletionchoice.py @@ -0,0 +1,33 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from mistralai.gcp.client.types import BaseModel, UnrecognizedStr +from typing import Literal, Union +from typing_extensions import TypedDict + + +ChatCompletionChoiceFinishReason = Union[ + Literal[ + "stop", + "length", + "model_length", + "error", + "tool_calls", + ], + UnrecognizedStr, +] + + +class ChatCompletionChoiceTypedDict(TypedDict): + index: int + message: AssistantMessageTypedDict + finish_reason: ChatCompletionChoiceFinishReason + + +class ChatCompletionChoice(BaseModel): + index: int + + message: AssistantMessage + + finish_reason: ChatCompletionChoiceFinishReason diff --git a/packages/gcp/src/mistralai/gcp/client/models/chatcompletionrequest.py b/packages/gcp/src/mistralai/gcp/client/models/chatcompletionrequest.py new file mode 100644 index 00000000..8229c5bb --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/chatcompletionrequest.py @@ -0,0 +1,218 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .mistralpromptmode import MistralPromptMode +from .prediction import Prediction, PredictionTypedDict +from .responseformat import ResponseFormat, ResponseFormatTypedDict +from .systemmessage import SystemMessage, SystemMessageTypedDict +from .tool import Tool, ToolTypedDict +from .toolchoice import ToolChoice, ToolChoiceTypedDict +from .toolchoiceenum import ToolChoiceEnum +from .toolmessage import ToolMessage, ToolMessageTypedDict +from .usermessage import UserMessage, UserMessageTypedDict +from mistralai.gcp.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.gcp.client.utils import get_discriminator +from pydantic import Discriminator, Tag, model_serializer +from typing import Any, Dict, List, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +ChatCompletionRequestStopTypedDict = TypeAliasType( + "ChatCompletionRequestStopTypedDict", Union[str, List[str]] +) +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +ChatCompletionRequestStop = TypeAliasType( + "ChatCompletionRequestStop", Union[str, List[str]] +) +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +ChatCompletionRequestMessageTypedDict = TypeAliasType( + "ChatCompletionRequestMessageTypedDict", + Union[ + SystemMessageTypedDict, + UserMessageTypedDict, + AssistantMessageTypedDict, + ToolMessageTypedDict, + ], +) + + +ChatCompletionRequestMessage = Annotated[ + Union[ + Annotated[AssistantMessage, Tag("assistant")], + Annotated[SystemMessage, Tag("system")], + Annotated[ToolMessage, Tag("tool")], + Annotated[UserMessage, Tag("user")], + ], + Discriminator(lambda m: get_discriminator(m, "role", "role")), +] + + +ChatCompletionRequestToolChoiceTypedDict = TypeAliasType( + "ChatCompletionRequestToolChoiceTypedDict", + Union[ToolChoiceTypedDict, ToolChoiceEnum], +) +r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" + + +ChatCompletionRequestToolChoice = TypeAliasType( + "ChatCompletionRequestToolChoice", Union[ToolChoice, ToolChoiceEnum] +) +r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" + + +class ChatCompletionRequestTypedDict(TypedDict): + model: str + r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" + messages: List[ChatCompletionRequestMessageTypedDict] + r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + temperature: NotRequired[Nullable[float]] + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" + top_p: NotRequired[float] + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: NotRequired[Nullable[int]] + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + stream: NotRequired[bool] + r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" + stop: NotRequired[ChatCompletionRequestStopTypedDict] + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: NotRequired[Nullable[int]] + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + metadata: NotRequired[Nullable[Dict[str, Any]]] + response_format: NotRequired[ResponseFormatTypedDict] + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" + tools: NotRequired[Nullable[List[ToolTypedDict]]] + r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.""" + tool_choice: NotRequired[ChatCompletionRequestToolChoiceTypedDict] + r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" + presence_penalty: NotRequired[float] + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + frequency_penalty: NotRequired[float] + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + n: NotRequired[Nullable[int]] + r"""Number of completions to return for each request, input tokens are only billed once.""" + prediction: NotRequired[PredictionTypedDict] + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" + parallel_tool_calls: NotRequired[bool] + r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.""" + prompt_mode: NotRequired[Nullable[MistralPromptMode]] + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" + + +class ChatCompletionRequest(BaseModel): + model: str + r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" + + messages: List[ChatCompletionRequestMessage] + r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + + temperature: OptionalNullable[float] = UNSET + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" + + top_p: Optional[float] = None + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + + max_tokens: OptionalNullable[int] = UNSET + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + + stream: Optional[bool] = False + r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" + + stop: Optional[ChatCompletionRequestStop] = None + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + random_seed: OptionalNullable[int] = UNSET + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + + response_format: Optional[ResponseFormat] = None + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" + + tools: OptionalNullable[List[Tool]] = UNSET + r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.""" + + tool_choice: Optional[ChatCompletionRequestToolChoice] = None + r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" + + presence_penalty: Optional[float] = None + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + + frequency_penalty: Optional[float] = None + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + + n: OptionalNullable[int] = UNSET + r"""Number of completions to return for each request, input tokens are only billed once.""" + + prediction: Optional[Prediction] = None + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" + + parallel_tool_calls: Optional[bool] = None + r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.""" + + prompt_mode: OptionalNullable[MistralPromptMode] = UNSET + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "temperature", + "top_p", + "max_tokens", + "stream", + "stop", + "random_seed", + "metadata", + "response_format", + "tools", + "tool_choice", + "presence_penalty", + "frequency_penalty", + "n", + "prediction", + "parallel_tool_calls", + "prompt_mode", + ] + ) + nullable_fields = set( + [ + "temperature", + "max_tokens", + "random_seed", + "metadata", + "tools", + "n", + "prompt_mode", + ] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/packages/gcp/src/mistralai/gcp/client/models/chatcompletionresponse.py b/packages/gcp/src/mistralai/gcp/client/models/chatcompletionresponse.py new file mode 100644 index 00000000..317c4d84 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/chatcompletionresponse.py @@ -0,0 +1,31 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict +from .usageinfo import UsageInfo, UsageInfoTypedDict +from mistralai.gcp.client.types import BaseModel +from typing import List +from typing_extensions import TypedDict + + +class ChatCompletionResponseTypedDict(TypedDict): + id: str + object: str + model: str + usage: UsageInfoTypedDict + created: int + choices: List[ChatCompletionChoiceTypedDict] + + +class ChatCompletionResponse(BaseModel): + id: str + + object: str + + model: str + + usage: UsageInfo + + created: int + + choices: List[ChatCompletionChoice] diff --git a/packages/gcp/src/mistralai/gcp/client/models/chatcompletionstreamrequest.py b/packages/gcp/src/mistralai/gcp/client/models/chatcompletionstreamrequest.py new file mode 100644 index 00000000..3c228d2e --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/chatcompletionstreamrequest.py @@ -0,0 +1,216 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .mistralpromptmode import MistralPromptMode +from .prediction import Prediction, PredictionTypedDict +from .responseformat import ResponseFormat, ResponseFormatTypedDict +from .systemmessage import SystemMessage, SystemMessageTypedDict +from .tool import Tool, ToolTypedDict +from .toolchoice import ToolChoice, ToolChoiceTypedDict +from .toolchoiceenum import ToolChoiceEnum +from .toolmessage import ToolMessage, ToolMessageTypedDict +from .usermessage import UserMessage, UserMessageTypedDict +from mistralai.gcp.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.gcp.client.utils import get_discriminator +from pydantic import Discriminator, Tag, model_serializer +from typing import Any, Dict, List, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +ChatCompletionStreamRequestStopTypedDict = TypeAliasType( + "ChatCompletionStreamRequestStopTypedDict", Union[str, List[str]] +) +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +ChatCompletionStreamRequestStop = TypeAliasType( + "ChatCompletionStreamRequestStop", Union[str, List[str]] +) +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +ChatCompletionStreamRequestMessageTypedDict = TypeAliasType( + "ChatCompletionStreamRequestMessageTypedDict", + Union[ + SystemMessageTypedDict, + UserMessageTypedDict, + AssistantMessageTypedDict, + ToolMessageTypedDict, + ], +) + + +ChatCompletionStreamRequestMessage = Annotated[ + Union[ + Annotated[AssistantMessage, Tag("assistant")], + Annotated[SystemMessage, Tag("system")], + Annotated[ToolMessage, Tag("tool")], + Annotated[UserMessage, Tag("user")], + ], + Discriminator(lambda m: get_discriminator(m, "role", "role")), +] + + +ChatCompletionStreamRequestToolChoiceTypedDict = TypeAliasType( + "ChatCompletionStreamRequestToolChoiceTypedDict", + Union[ToolChoiceTypedDict, ToolChoiceEnum], +) +r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" + + +ChatCompletionStreamRequestToolChoice = TypeAliasType( + "ChatCompletionStreamRequestToolChoice", Union[ToolChoice, ToolChoiceEnum] +) +r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" + + +class ChatCompletionStreamRequestTypedDict(TypedDict): + model: str + r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" + messages: List[ChatCompletionStreamRequestMessageTypedDict] + r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + temperature: NotRequired[Nullable[float]] + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" + top_p: NotRequired[float] + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: NotRequired[Nullable[int]] + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + stream: NotRequired[bool] + stop: NotRequired[ChatCompletionStreamRequestStopTypedDict] + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: NotRequired[Nullable[int]] + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + metadata: NotRequired[Nullable[Dict[str, Any]]] + response_format: NotRequired[ResponseFormatTypedDict] + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" + tools: NotRequired[Nullable[List[ToolTypedDict]]] + r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.""" + tool_choice: NotRequired[ChatCompletionStreamRequestToolChoiceTypedDict] + r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" + presence_penalty: NotRequired[float] + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + frequency_penalty: NotRequired[float] + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + n: NotRequired[Nullable[int]] + r"""Number of completions to return for each request, input tokens are only billed once.""" + prediction: NotRequired[PredictionTypedDict] + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" + parallel_tool_calls: NotRequired[bool] + r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.""" + prompt_mode: NotRequired[Nullable[MistralPromptMode]] + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" + + +class ChatCompletionStreamRequest(BaseModel): + model: str + r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" + + messages: List[ChatCompletionStreamRequestMessage] + r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + + temperature: OptionalNullable[float] = UNSET + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" + + top_p: Optional[float] = None + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + + max_tokens: OptionalNullable[int] = UNSET + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + + stream: Optional[bool] = True + + stop: Optional[ChatCompletionStreamRequestStop] = None + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + random_seed: OptionalNullable[int] = UNSET + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + + response_format: Optional[ResponseFormat] = None + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" + + tools: OptionalNullable[List[Tool]] = UNSET + r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.""" + + tool_choice: Optional[ChatCompletionStreamRequestToolChoice] = None + r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" + + presence_penalty: Optional[float] = None + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + + frequency_penalty: Optional[float] = None + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + + n: OptionalNullable[int] = UNSET + r"""Number of completions to return for each request, input tokens are only billed once.""" + + prediction: Optional[Prediction] = None + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" + + parallel_tool_calls: Optional[bool] = None + r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.""" + + prompt_mode: OptionalNullable[MistralPromptMode] = UNSET + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "temperature", + "top_p", + "max_tokens", + "stream", + "stop", + "random_seed", + "metadata", + "response_format", + "tools", + "tool_choice", + "presence_penalty", + "frequency_penalty", + "n", + "prediction", + "parallel_tool_calls", + "prompt_mode", + ] + ) + nullable_fields = set( + [ + "temperature", + "max_tokens", + "random_seed", + "metadata", + "tools", + "n", + "prompt_mode", + ] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/packages/gcp/src/mistralai/gcp/client/models/completionchunk.py b/packages/gcp/src/mistralai/gcp/client/models/completionchunk.py new file mode 100644 index 00000000..a0b1ae2f --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/completionchunk.py @@ -0,0 +1,51 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .completionresponsestreamchoice import ( + CompletionResponseStreamChoice, + CompletionResponseStreamChoiceTypedDict, +) +from .usageinfo import UsageInfo, UsageInfoTypedDict +from mistralai.gcp.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer +from typing import List, Optional +from typing_extensions import NotRequired, TypedDict + + +class CompletionChunkTypedDict(TypedDict): + id: str + model: str + choices: List[CompletionResponseStreamChoiceTypedDict] + object: NotRequired[str] + created: NotRequired[int] + usage: NotRequired[UsageInfoTypedDict] + + +class CompletionChunk(BaseModel): + id: str + + model: str + + choices: List[CompletionResponseStreamChoice] + + object: Optional[str] = None + + created: Optional[int] = None + + usage: Optional[UsageInfo] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["object", "created", "usage"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/packages/gcp/src/mistralai/gcp/client/models/completionevent.py b/packages/gcp/src/mistralai/gcp/client/models/completionevent.py new file mode 100644 index 00000000..bb155009 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/completionevent.py @@ -0,0 +1,14 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .completionchunk import CompletionChunk, CompletionChunkTypedDict +from mistralai.gcp.client.types import BaseModel +from typing_extensions import TypedDict + + +class CompletionEventTypedDict(TypedDict): + data: CompletionChunkTypedDict + + +class CompletionEvent(BaseModel): + data: CompletionChunk diff --git a/packages/gcp/src/mistralai/gcp/client/models/completionresponsestreamchoice.py b/packages/gcp/src/mistralai/gcp/client/models/completionresponsestreamchoice.py new file mode 100644 index 00000000..e58d4c88 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/completionresponsestreamchoice.py @@ -0,0 +1,52 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .deltamessage import DeltaMessage, DeltaMessageTypedDict +from mistralai.gcp.client.types import ( + BaseModel, + Nullable, + UNSET_SENTINEL, + UnrecognizedStr, +) +from pydantic import model_serializer +from typing import Literal, Union +from typing_extensions import TypedDict + + +CompletionResponseStreamChoiceFinishReason = Union[ + Literal[ + "stop", + "length", + "error", + "tool_calls", + ], + UnrecognizedStr, +] + + +class CompletionResponseStreamChoiceTypedDict(TypedDict): + index: int + delta: DeltaMessageTypedDict + finish_reason: Nullable[CompletionResponseStreamChoiceFinishReason] + + +class CompletionResponseStreamChoice(BaseModel): + index: int + + delta: DeltaMessage + + finish_reason: Nullable[CompletionResponseStreamChoiceFinishReason] + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + m[k] = val + + return m diff --git a/packages/gcp/src/mistralai/gcp/client/models/contentchunk.py b/packages/gcp/src/mistralai/gcp/client/models/contentchunk.py new file mode 100644 index 00000000..18d48150 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/contentchunk.py @@ -0,0 +1,50 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict +from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict +from .textchunk import TextChunk, TextChunkTypedDict +from functools import partial +from mistralai.gcp.client.types import BaseModel +from mistralai.gcp.client.utils.unions import parse_open_union +from pydantic import ConfigDict +from pydantic.functional_validators import BeforeValidator +from typing import Any, Literal, Union +from typing_extensions import Annotated, TypeAliasType + + +ContentChunkTypedDict = TypeAliasType( + "ContentChunkTypedDict", + Union[TextChunkTypedDict, ImageURLChunkTypedDict, ReferenceChunkTypedDict], +) + + +class UnknownContentChunk(BaseModel): + r"""A ContentChunk variant the SDK doesn't recognize. Preserves the raw payload.""" + + type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_CONTENT_CHUNK_VARIANTS: dict[str, Any] = { + "image_url": ImageURLChunk, + "text": TextChunk, + "reference": ReferenceChunk, +} + + +ContentChunk = Annotated[ + Union[ImageURLChunk, TextChunk, ReferenceChunk, UnknownContentChunk], + BeforeValidator( + partial( + parse_open_union, + disc_key="type", + variants=_CONTENT_CHUNK_VARIANTS, + unknown_cls=UnknownContentChunk, + union_name="ContentChunk", + ) + ), +] diff --git a/packages/gcp/src/mistralai/gcp/client/models/deltamessage.py b/packages/gcp/src/mistralai/gcp/client/models/deltamessage.py new file mode 100644 index 00000000..63e6a7f3 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/deltamessage.py @@ -0,0 +1,64 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .contentchunk import ContentChunk, ContentChunkTypedDict +from .toolcall import ToolCall, ToolCallTypedDict +from mistralai.gcp.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +DeltaMessageContentTypedDict = TypeAliasType( + "DeltaMessageContentTypedDict", Union[str, List[ContentChunkTypedDict]] +) + + +DeltaMessageContent = TypeAliasType( + "DeltaMessageContent", Union[str, List[ContentChunk]] +) + + +class DeltaMessageTypedDict(TypedDict): + role: NotRequired[Nullable[str]] + content: NotRequired[Nullable[DeltaMessageContentTypedDict]] + tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] + + +class DeltaMessage(BaseModel): + role: OptionalNullable[str] = UNSET + + content: OptionalNullable[DeltaMessageContent] = UNSET + + tool_calls: OptionalNullable[List[ToolCall]] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["role", "content", "tool_calls"]) + nullable_fields = set(["role", "content", "tool_calls"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/packages/gcp/src/mistralai/gcp/client/models/fimcompletionrequest.py b/packages/gcp/src/mistralai/gcp/client/models/fimcompletionrequest.py new file mode 100644 index 00000000..e460f76c --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/fimcompletionrequest.py @@ -0,0 +1,129 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.gcp.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Any, Dict, List, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +FIMCompletionRequestStopTypedDict = TypeAliasType( + "FIMCompletionRequestStopTypedDict", Union[str, List[str]] +) +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +FIMCompletionRequestStop = TypeAliasType( + "FIMCompletionRequestStop", Union[str, List[str]] +) +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +class FIMCompletionRequestTypedDict(TypedDict): + model: str + r"""ID of the model with FIM to use.""" + prompt: str + r"""The text/code to complete.""" + temperature: NotRequired[Nullable[float]] + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" + top_p: NotRequired[float] + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: NotRequired[Nullable[int]] + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + stream: NotRequired[bool] + r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" + stop: NotRequired[FIMCompletionRequestStopTypedDict] + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: NotRequired[Nullable[int]] + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + metadata: NotRequired[Nullable[Dict[str, Any]]] + suffix: NotRequired[Nullable[str]] + r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + min_tokens: NotRequired[Nullable[int]] + r"""The minimum number of tokens to generate in the completion.""" + + +class FIMCompletionRequest(BaseModel): + model: str + r"""ID of the model with FIM to use.""" + + prompt: str + r"""The text/code to complete.""" + + temperature: OptionalNullable[float] = UNSET + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" + + top_p: Optional[float] = 1 + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + + max_tokens: OptionalNullable[int] = UNSET + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + + stream: Optional[bool] = False + r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" + + stop: Optional[FIMCompletionRequestStop] = None + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + random_seed: OptionalNullable[int] = UNSET + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + + suffix: OptionalNullable[str] = UNSET + r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + + min_tokens: OptionalNullable[int] = UNSET + r"""The minimum number of tokens to generate in the completion.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "temperature", + "top_p", + "max_tokens", + "stream", + "stop", + "random_seed", + "metadata", + "suffix", + "min_tokens", + ] + ) + nullable_fields = set( + [ + "temperature", + "max_tokens", + "random_seed", + "metadata", + "suffix", + "min_tokens", + ] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/packages/gcp/src/mistralai/gcp/client/models/fimcompletionresponse.py b/packages/gcp/src/mistralai/gcp/client/models/fimcompletionresponse.py new file mode 100644 index 00000000..5b80da3f --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/fimcompletionresponse.py @@ -0,0 +1,31 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict +from .usageinfo import UsageInfo, UsageInfoTypedDict +from mistralai.gcp.client.types import BaseModel +from typing import List +from typing_extensions import TypedDict + + +class FIMCompletionResponseTypedDict(TypedDict): + id: str + object: str + model: str + usage: UsageInfoTypedDict + created: int + choices: List[ChatCompletionChoiceTypedDict] + + +class FIMCompletionResponse(BaseModel): + id: str + + object: str + + model: str + + usage: UsageInfo + + created: int + + choices: List[ChatCompletionChoice] diff --git a/packages/gcp/src/mistralai/gcp/client/models/fimcompletionstreamrequest.py b/packages/gcp/src/mistralai/gcp/client/models/fimcompletionstreamrequest.py new file mode 100644 index 00000000..fffc3054 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/fimcompletionstreamrequest.py @@ -0,0 +1,127 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.gcp.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Any, Dict, List, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +FIMCompletionStreamRequestStopTypedDict = TypeAliasType( + "FIMCompletionStreamRequestStopTypedDict", Union[str, List[str]] +) +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +FIMCompletionStreamRequestStop = TypeAliasType( + "FIMCompletionStreamRequestStop", Union[str, List[str]] +) +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +class FIMCompletionStreamRequestTypedDict(TypedDict): + model: str + r"""ID of the model with FIM to use.""" + prompt: str + r"""The text/code to complete.""" + temperature: NotRequired[Nullable[float]] + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" + top_p: NotRequired[float] + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: NotRequired[Nullable[int]] + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + stream: NotRequired[bool] + stop: NotRequired[FIMCompletionStreamRequestStopTypedDict] + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: NotRequired[Nullable[int]] + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + metadata: NotRequired[Nullable[Dict[str, Any]]] + suffix: NotRequired[Nullable[str]] + r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + min_tokens: NotRequired[Nullable[int]] + r"""The minimum number of tokens to generate in the completion.""" + + +class FIMCompletionStreamRequest(BaseModel): + model: str + r"""ID of the model with FIM to use.""" + + prompt: str + r"""The text/code to complete.""" + + temperature: OptionalNullable[float] = UNSET + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" + + top_p: Optional[float] = 1 + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + + max_tokens: OptionalNullable[int] = UNSET + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + + stream: Optional[bool] = True + + stop: Optional[FIMCompletionStreamRequestStop] = None + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + random_seed: OptionalNullable[int] = UNSET + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + + suffix: OptionalNullable[str] = UNSET + r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + + min_tokens: OptionalNullable[int] = UNSET + r"""The minimum number of tokens to generate in the completion.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "temperature", + "top_p", + "max_tokens", + "stream", + "stop", + "random_seed", + "metadata", + "suffix", + "min_tokens", + ] + ) + nullable_fields = set( + [ + "temperature", + "max_tokens", + "random_seed", + "metadata", + "suffix", + "min_tokens", + ] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/packages/gcp/src/mistralai/gcp/client/models/function.py b/packages/gcp/src/mistralai/gcp/client/models/function.py new file mode 100644 index 00000000..439e8313 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/function.py @@ -0,0 +1,40 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.gcp.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Any, Dict, Optional +from typing_extensions import NotRequired, TypedDict + + +class FunctionTypedDict(TypedDict): + name: str + parameters: Dict[str, Any] + description: NotRequired[str] + strict: NotRequired[bool] + + +class Function(BaseModel): + name: str + + parameters: Dict[str, Any] + + description: Optional[str] = None + + strict: Optional[bool] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["description", "strict"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/packages/gcp/src/mistralai/gcp/client/models/functioncall.py b/packages/gcp/src/mistralai/gcp/client/models/functioncall.py new file mode 100644 index 00000000..0f1b2425 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/functioncall.py @@ -0,0 +1,23 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.gcp.client.types import BaseModel +from typing import Any, Dict, Union +from typing_extensions import TypeAliasType, TypedDict + + +ArgumentsTypedDict = TypeAliasType("ArgumentsTypedDict", Union[Dict[str, Any], str]) + + +Arguments = TypeAliasType("Arguments", Union[Dict[str, Any], str]) + + +class FunctionCallTypedDict(TypedDict): + name: str + arguments: ArgumentsTypedDict + + +class FunctionCall(BaseModel): + name: str + + arguments: Arguments diff --git a/packages/gcp/src/mistralai/gcp/client/models/functionname.py b/packages/gcp/src/mistralai/gcp/client/models/functionname.py new file mode 100644 index 00000000..585b9e39 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/functionname.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.gcp.client.types import BaseModel +from typing_extensions import TypedDict + + +class FunctionNameTypedDict(TypedDict): + r"""this restriction of `Function` is used to select a specific function to call""" + + name: str + + +class FunctionName(BaseModel): + r"""this restriction of `Function` is used to select a specific function to call""" + + name: str diff --git a/packages/gcp/src/mistralai/gcp/client/models/imagedetail.py b/packages/gcp/src/mistralai/gcp/client/models/imagedetail.py new file mode 100644 index 00000000..68ed7608 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/imagedetail.py @@ -0,0 +1,15 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.gcp.client.types import UnrecognizedStr +from typing import Literal, Union + + +ImageDetail = Union[ + Literal[ + "low", + "auto", + "high", + ], + UnrecognizedStr, +] diff --git a/packages/gcp/src/mistralai/gcp/client/models/imageurl.py b/packages/gcp/src/mistralai/gcp/client/models/imageurl.py new file mode 100644 index 00000000..903d0a1a --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/imageurl.py @@ -0,0 +1,49 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .imagedetail import ImageDetail +from mistralai.gcp.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class ImageURLTypedDict(TypedDict): + url: str + detail: NotRequired[Nullable[ImageDetail]] + + +class ImageURL(BaseModel): + url: str + + detail: OptionalNullable[ImageDetail] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["detail"]) + nullable_fields = set(["detail"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/packages/gcp/src/mistralai/gcp/client/models/imageurlchunk.py b/packages/gcp/src/mistralai/gcp/client/models/imageurlchunk.py new file mode 100644 index 00000000..4bec0eec --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/imageurlchunk.py @@ -0,0 +1,42 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .imageurl import ImageURL, ImageURLTypedDict +from mistralai.gcp.client.types import BaseModel +from mistralai.gcp.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import Literal, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +ImageURLUnionTypedDict = TypeAliasType( + "ImageURLUnionTypedDict", Union[ImageURLTypedDict, str] +) + + +ImageURLUnion = TypeAliasType("ImageURLUnion", Union[ImageURL, str]) + + +class ImageURLChunkTypedDict(TypedDict): + r"""{\"type\":\"image_url\",\"image_url\":{\"url\":\"data:image/png;base64,iVBORw0""" + + image_url: ImageURLUnionTypedDict + type: Literal["image_url"] + + +class ImageURLChunk(BaseModel): + r"""{\"type\":\"image_url\",\"image_url\":{\"url\":\"data:image/png;base64,iVBORw0""" + + image_url: ImageURLUnion + + type: Annotated[ + Annotated[Literal["image_url"], AfterValidator(validate_const("image_url"))], + pydantic.Field(alias="type"), + ] = "image_url" + + +try: + ImageURLChunk.model_rebuild() +except NameError: + pass diff --git a/packages/gcp/src/mistralai/gcp/client/models/jsonschema.py b/packages/gcp/src/mistralai/gcp/client/models/jsonschema.py new file mode 100644 index 00000000..684ac09f --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/jsonschema.py @@ -0,0 +1,62 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.gcp.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +import pydantic +from pydantic import model_serializer +from typing import Any, Dict, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class JSONSchemaTypedDict(TypedDict): + name: str + schema_definition: Dict[str, Any] + description: NotRequired[Nullable[str]] + strict: NotRequired[bool] + + +class JSONSchema(BaseModel): + name: str + + schema_definition: Annotated[Dict[str, Any], pydantic.Field(alias="schema")] + + description: OptionalNullable[str] = UNSET + + strict: Optional[bool] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["description", "strict"]) + nullable_fields = set(["description"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + JSONSchema.model_rebuild() +except NameError: + pass diff --git a/packages/gcp/src/mistralai/gcp/client/models/mistralpromptmode.py b/packages/gcp/src/mistralai/gcp/client/models/mistralpromptmode.py new file mode 100644 index 00000000..c765e4f1 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/mistralpromptmode.py @@ -0,0 +1,12 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.gcp.client.types import UnrecognizedStr +from typing import Literal, Union + + +MistralPromptMode = Union[Literal["reasoning",], UnrecognizedStr] +r"""Available options to the prompt_mode argument on the chat completion endpoint. +Values represent high-level intent. Assignment to actual SPs is handled internally. +System prompt may include knowledge cutoff date, model capabilities, tone to use, safety guidelines, etc. +""" diff --git a/packages/gcp/src/mistralai/gcp/client/models/prediction.py b/packages/gcp/src/mistralai/gcp/client/models/prediction.py new file mode 100644 index 00000000..2e325289 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/prediction.py @@ -0,0 +1,52 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.gcp.client.types import BaseModel, UNSET_SENTINEL +from mistralai.gcp.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class PredictionTypedDict(TypedDict): + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" + + type: Literal["content"] + content: NotRequired[str] + + +class Prediction(BaseModel): + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" + + type: Annotated[ + Annotated[ + Optional[Literal["content"]], AfterValidator(validate_const("content")) + ], + pydantic.Field(alias="type"), + ] = "content" + + content: Optional[str] = "" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type", "content"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + Prediction.model_rebuild() +except NameError: + pass diff --git a/packages/gcp/src/mistralai/gcp/client/models/referencechunk.py b/packages/gcp/src/mistralai/gcp/client/models/referencechunk.py new file mode 100644 index 00000000..261c4755 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/referencechunk.py @@ -0,0 +1,48 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.gcp.client.types import BaseModel, UNSET_SENTINEL +from mistralai.gcp.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional +from typing_extensions import Annotated, TypedDict + + +class ReferenceChunkTypedDict(TypedDict): + reference_ids: List[int] + type: Literal["reference"] + + +class ReferenceChunk(BaseModel): + reference_ids: List[int] + + type: Annotated[ + Annotated[ + Optional[Literal["reference"]], AfterValidator(validate_const("reference")) + ], + pydantic.Field(alias="type"), + ] = "reference" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ReferenceChunk.model_rebuild() +except NameError: + pass diff --git a/packages/gcp/src/mistralai/gcp/client/models/responseformat.py b/packages/gcp/src/mistralai/gcp/client/models/responseformat.py new file mode 100644 index 00000000..f3aa9930 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/responseformat.py @@ -0,0 +1,55 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .jsonschema import JSONSchema, JSONSchemaTypedDict +from .responseformats import ResponseFormats +from mistralai.gcp.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class ResponseFormatTypedDict(TypedDict): + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" + + type: NotRequired[ResponseFormats] + json_schema: NotRequired[Nullable[JSONSchemaTypedDict]] + + +class ResponseFormat(BaseModel): + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" + + type: Optional[ResponseFormats] = None + + json_schema: OptionalNullable[JSONSchema] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type", "json_schema"]) + nullable_fields = set(["json_schema"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/packages/gcp/src/mistralai/gcp/client/models/responseformats.py b/packages/gcp/src/mistralai/gcp/client/models/responseformats.py new file mode 100644 index 00000000..cbf83ce7 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/responseformats.py @@ -0,0 +1,11 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from typing import Literal + + +ResponseFormats = Literal[ + "text", + "json_object", + "json_schema", +] diff --git a/packages/gcp/src/mistralai/gcp/client/models/security.py b/packages/gcp/src/mistralai/gcp/client/models/security.py new file mode 100644 index 00000000..10a469b5 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/security.py @@ -0,0 +1,24 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.gcp.client.types import BaseModel +from mistralai.gcp.client.utils import FieldMetadata, SecurityMetadata +from typing_extensions import Annotated, TypedDict + + +class SecurityTypedDict(TypedDict): + api_key: str + + +class Security(BaseModel): + api_key: Annotated[ + str, + FieldMetadata( + security=SecurityMetadata( + scheme=True, + scheme_type="http", + sub_type="bearer", + field_name="Authorization", + ) + ), + ] diff --git a/packages/gcp/src/mistralai/gcp/client/models/systemmessage.py b/packages/gcp/src/mistralai/gcp/client/models/systemmessage.py new file mode 100644 index 00000000..b3795c4b --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/systemmessage.py @@ -0,0 +1,44 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .systemmessagecontentchunks import ( + SystemMessageContentChunks, + SystemMessageContentChunksTypedDict, +) +from mistralai.gcp.client.types import BaseModel +from mistralai.gcp.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +SystemMessageContentTypedDict = TypeAliasType( + "SystemMessageContentTypedDict", + Union[str, List[SystemMessageContentChunksTypedDict]], +) + + +SystemMessageContent = TypeAliasType( + "SystemMessageContent", Union[str, List[SystemMessageContentChunks]] +) + + +class SystemMessageTypedDict(TypedDict): + content: SystemMessageContentTypedDict + role: Literal["system"] + + +class SystemMessage(BaseModel): + content: SystemMessageContent + + role: Annotated[ + Annotated[Literal["system"], AfterValidator(validate_const("system"))], + pydantic.Field(alias="role"), + ] = "system" + + +try: + SystemMessage.model_rebuild() +except NameError: + pass diff --git a/packages/gcp/src/mistralai/gcp/client/models/systemmessagecontentchunks.py b/packages/gcp/src/mistralai/gcp/client/models/systemmessagecontentchunks.py new file mode 100644 index 00000000..8de71c90 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/systemmessagecontentchunks.py @@ -0,0 +1,19 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .textchunk import TextChunk, TextChunkTypedDict +from .thinkchunk import ThinkChunk, ThinkChunkTypedDict +from pydantic import Field +from typing import Union +from typing_extensions import Annotated, TypeAliasType + + +SystemMessageContentChunksTypedDict = TypeAliasType( + "SystemMessageContentChunksTypedDict", + Union[TextChunkTypedDict, ThinkChunkTypedDict], +) + + +SystemMessageContentChunks = Annotated[ + Union[TextChunk, ThinkChunk], Field(discriminator="type") +] diff --git a/packages/gcp/src/mistralai/gcp/client/models/textchunk.py b/packages/gcp/src/mistralai/gcp/client/models/textchunk.py new file mode 100644 index 00000000..69032272 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/textchunk.py @@ -0,0 +1,29 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.gcp.client.types import BaseModel +from mistralai.gcp.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import Literal +from typing_extensions import Annotated, TypedDict + + +class TextChunkTypedDict(TypedDict): + text: str + type: Literal["text"] + + +class TextChunk(BaseModel): + text: str + + type: Annotated[ + Annotated[Literal["text"], AfterValidator(validate_const("text"))], + pydantic.Field(alias="type"), + ] = "text" + + +try: + TextChunk.model_rebuild() +except NameError: + pass diff --git a/packages/gcp/src/mistralai/gcp/client/models/thinkchunk.py b/packages/gcp/src/mistralai/gcp/client/models/thinkchunk.py new file mode 100644 index 00000000..33ec8394 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/thinkchunk.py @@ -0,0 +1,61 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict +from .textchunk import TextChunk, TextChunkTypedDict +from mistralai.gcp.client.types import BaseModel, UNSET_SENTINEL +from mistralai.gcp.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +ThinkingTypedDict = TypeAliasType( + "ThinkingTypedDict", Union[ReferenceChunkTypedDict, TextChunkTypedDict] +) + + +Thinking = TypeAliasType("Thinking", Union[ReferenceChunk, TextChunk]) + + +class ThinkChunkTypedDict(TypedDict): + thinking: List[ThinkingTypedDict] + type: Literal["thinking"] + closed: NotRequired[bool] + r"""Whether the thinking chunk is closed or not. Currently only used for prefixing.""" + + +class ThinkChunk(BaseModel): + thinking: List[Thinking] + + type: Annotated[ + Annotated[Literal["thinking"], AfterValidator(validate_const("thinking"))], + pydantic.Field(alias="type"), + ] = "thinking" + + closed: Optional[bool] = None + r"""Whether the thinking chunk is closed or not. Currently only used for prefixing.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["closed"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ThinkChunk.model_rebuild() +except NameError: + pass diff --git a/packages/gcp/src/mistralai/gcp/client/models/tool.py b/packages/gcp/src/mistralai/gcp/client/models/tool.py new file mode 100644 index 00000000..670aa81f --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/tool.py @@ -0,0 +1,36 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .function import Function, FunctionTypedDict +from .tooltypes import ToolTypes +from mistralai.gcp.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class ToolTypedDict(TypedDict): + function: FunctionTypedDict + type: NotRequired[ToolTypes] + + +class Tool(BaseModel): + function: Function + + type: Optional[ToolTypes] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/packages/gcp/src/mistralai/gcp/client/models/toolcall.py b/packages/gcp/src/mistralai/gcp/client/models/toolcall.py new file mode 100644 index 00000000..3ea8e283 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/toolcall.py @@ -0,0 +1,42 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .functioncall import FunctionCall, FunctionCallTypedDict +from .tooltypes import ToolTypes +from mistralai.gcp.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class ToolCallTypedDict(TypedDict): + function: FunctionCallTypedDict + id: NotRequired[str] + type: NotRequired[ToolTypes] + index: NotRequired[int] + + +class ToolCall(BaseModel): + function: FunctionCall + + id: Optional[str] = "null" + + type: Optional[ToolTypes] = None + + index: Optional[int] = 0 + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["id", "type", "index"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/packages/gcp/src/mistralai/gcp/client/models/toolchoice.py b/packages/gcp/src/mistralai/gcp/client/models/toolchoice.py new file mode 100644 index 00000000..6e795fd7 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/toolchoice.py @@ -0,0 +1,42 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .functionname import FunctionName, FunctionNameTypedDict +from .tooltypes import ToolTypes +from mistralai.gcp.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class ToolChoiceTypedDict(TypedDict): + r"""ToolChoice is either a ToolChoiceEnum or a ToolChoice""" + + function: FunctionNameTypedDict + r"""this restriction of `Function` is used to select a specific function to call""" + type: NotRequired[ToolTypes] + + +class ToolChoice(BaseModel): + r"""ToolChoice is either a ToolChoiceEnum or a ToolChoice""" + + function: FunctionName + r"""this restriction of `Function` is used to select a specific function to call""" + + type: Optional[ToolTypes] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/packages/gcp/src/mistralai/gcp/client/models/toolchoiceenum.py b/packages/gcp/src/mistralai/gcp/client/models/toolchoiceenum.py new file mode 100644 index 00000000..01f6f677 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/toolchoiceenum.py @@ -0,0 +1,12 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from typing import Literal + + +ToolChoiceEnum = Literal[ + "auto", + "none", + "any", + "required", +] diff --git a/packages/gcp/src/mistralai/gcp/client/models/toolmessage.py b/packages/gcp/src/mistralai/gcp/client/models/toolmessage.py new file mode 100644 index 00000000..ce160391 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/toolmessage.py @@ -0,0 +1,76 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .contentchunk import ContentChunk, ContentChunkTypedDict +from mistralai.gcp.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.gcp.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +ToolMessageContentTypedDict = TypeAliasType( + "ToolMessageContentTypedDict", Union[str, List[ContentChunkTypedDict]] +) + + +ToolMessageContent = TypeAliasType("ToolMessageContent", Union[str, List[ContentChunk]]) + + +class ToolMessageTypedDict(TypedDict): + content: Nullable[ToolMessageContentTypedDict] + role: Literal["tool"] + tool_call_id: NotRequired[Nullable[str]] + name: NotRequired[Nullable[str]] + + +class ToolMessage(BaseModel): + content: Nullable[ToolMessageContent] + + role: Annotated[ + Annotated[Literal["tool"], AfterValidator(validate_const("tool"))], + pydantic.Field(alias="role"), + ] = "tool" + + tool_call_id: OptionalNullable[str] = UNSET + + name: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["tool_call_id", "name"]) + nullable_fields = set(["content", "tool_call_id", "name"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + ToolMessage.model_rebuild() +except NameError: + pass diff --git a/packages/gcp/src/mistralai/gcp/client/models/tooltypes.py b/packages/gcp/src/mistralai/gcp/client/models/tooltypes.py new file mode 100644 index 00000000..fd1aa13d --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/tooltypes.py @@ -0,0 +1,8 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.gcp.client.types import UnrecognizedStr +from typing import Literal, Union + + +ToolTypes = Union[Literal["function",], UnrecognizedStr] diff --git a/packages/gcp/src/mistralai/gcp/client/models/usageinfo.py b/packages/gcp/src/mistralai/gcp/client/models/usageinfo.py new file mode 100644 index 00000000..cb6feb6e --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/usageinfo.py @@ -0,0 +1,79 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.gcp.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +import pydantic +from pydantic import ConfigDict, model_serializer +from typing import Any, Dict, Optional +from typing_extensions import NotRequired, TypedDict + + +class UsageInfoTypedDict(TypedDict): + prompt_tokens: NotRequired[int] + completion_tokens: NotRequired[int] + total_tokens: NotRequired[int] + prompt_audio_seconds: NotRequired[Nullable[int]] + + +class UsageInfo(BaseModel): + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, extra="allow" + ) + __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) + + prompt_tokens: Optional[int] = 0 + + completion_tokens: Optional[int] = 0 + + total_tokens: Optional[int] = 0 + + prompt_audio_seconds: OptionalNullable[int] = UNSET + + @property + def additional_properties(self): + return self.__pydantic_extra__ + + @additional_properties.setter + def additional_properties(self, value): + self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "prompt_tokens", + "completion_tokens", + "total_tokens", + "prompt_audio_seconds", + ] + ) + nullable_fields = set(["prompt_audio_seconds"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + for k, v in serialized.items(): + m[k] = v + + return m diff --git a/packages/gcp/src/mistralai/gcp/client/models/usermessage.py b/packages/gcp/src/mistralai/gcp/client/models/usermessage.py new file mode 100644 index 00000000..e237e900 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/usermessage.py @@ -0,0 +1,53 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .contentchunk import ContentChunk, ContentChunkTypedDict +from mistralai.gcp.client.types import BaseModel, Nullable, UNSET_SENTINEL +from mistralai.gcp.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +UserMessageContentTypedDict = TypeAliasType( + "UserMessageContentTypedDict", Union[str, List[ContentChunkTypedDict]] +) + + +UserMessageContent = TypeAliasType("UserMessageContent", Union[str, List[ContentChunk]]) + + +class UserMessageTypedDict(TypedDict): + content: Nullable[UserMessageContentTypedDict] + role: Literal["user"] + + +class UserMessage(BaseModel): + content: Nullable[UserMessageContent] + + role: Annotated[ + Annotated[Literal["user"], AfterValidator(validate_const("user"))], + pydantic.Field(alias="role"), + ] = "user" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + m[k] = val + + return m + + +try: + UserMessage.model_rebuild() +except NameError: + pass diff --git a/packages/gcp/src/mistralai/gcp/client/models/validationerror.py b/packages/gcp/src/mistralai/gcp/client/models/validationerror.py new file mode 100644 index 00000000..2d330e9a --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/validationerror.py @@ -0,0 +1,26 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.gcp.client.types import BaseModel +from typing import List, Union +from typing_extensions import TypeAliasType, TypedDict + + +LocTypedDict = TypeAliasType("LocTypedDict", Union[str, int]) + + +Loc = TypeAliasType("Loc", Union[str, int]) + + +class ValidationErrorTypedDict(TypedDict): + loc: List[LocTypedDict] + msg: str + type: str + + +class ValidationError(BaseModel): + loc: List[Loc] + + msg: str + + type: str diff --git a/packages/gcp/src/mistralai/gcp/client/py.typed b/packages/gcp/src/mistralai/gcp/client/py.typed new file mode 100644 index 00000000..3e38f1a9 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/py.typed @@ -0,0 +1 @@ +# Marker file for PEP 561. The package enables type hints. diff --git a/packages/gcp/src/mistralai/gcp/client/sdk.py b/packages/gcp/src/mistralai/gcp/client/sdk.py new file mode 100644 index 00000000..e6e83839 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/sdk.py @@ -0,0 +1,243 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from .httpclient import AsyncHttpClient, ClientOwner, HttpClient, close_clients +from .sdkconfiguration import SDKConfiguration +from .utils.logger import Logger, get_default_logger +from .utils.retries import RetryConfig +import google.auth +import google.auth.credentials +import google.auth.transport.requests +import httpx +import importlib +from mistralai.gcp.client import models, utils +from mistralai.gcp.client._hooks import SDKHooks +from mistralai.gcp.client._hooks.registration import GCPVertexAIPathHook +from mistralai.gcp.client.types import OptionalNullable, UNSET +import sys +from typing import Callable, Dict, Optional, TYPE_CHECKING, cast +import weakref + +if TYPE_CHECKING: + from mistralai.gcp.client.chat import Chat + from mistralai.gcp.client.fim import Fim + + +class MistralGCP(BaseSDK): + r"""Mistral AI API: Dora OpenAPI schema + + Our Chat Completion and Embeddings APIs specification. Create your account on [La Plateforme](https://console.mistral.ai) to get access and read the [docs](https://docs.mistral.ai) to learn how to use it. + """ + + chat: "Chat" + r"""Chat Completion API.""" + fim: "Fim" + r"""Fill-in-the-middle API.""" + _sub_sdk_map = { + "chat": ("mistralai.gcp.client.chat", "Chat"), + "fim": ("mistralai.gcp.client.fim", "Fim"), + } + + def __init__( + self, + project_id: Optional[str] = None, + region: str = "europe-west4", + access_token: Optional[str] = None, + server: Optional[str] = None, + server_url: Optional[str] = None, + url_params: Optional[Dict[str, str]] = None, + client: Optional[HttpClient] = None, + async_client: Optional[AsyncHttpClient] = None, + retry_config: OptionalNullable[RetryConfig] = UNSET, + timeout_ms: Optional[int] = None, + debug_logger: Optional[Logger] = None, + ) -> None: + r"""Instantiates the SDK configuring it with the provided parameters. + + :param project_id: GCP project ID (auto-detected from credentials if not provided) + :param region: GCP region for Vertex AI (default: europe-west4) + :param access_token: Fixed access token for testing (skips google.auth) + :param server: The server by name to use for all methods + :param server_url: The server URL to use for all methods + :param url_params: Parameters to optionally template the server URL with + :param client: The HTTP client to use for all synchronous methods + :param async_client: The Async HTTP client to use for all asynchronous methods + :param retry_config: The retry configuration to use for all supported methods + :param timeout_ms: Optional request timeout applied to each operation in milliseconds + """ + credentials: Optional[google.auth.credentials.Credentials] = None + if access_token is None: + creds, detected_project_id = google.auth.default( + scopes=["https://www.googleapis.com/auth/cloud-platform"], + ) + if creds is None: + raise ValueError("Failed to obtain GCP credentials") + # Cast to Credentials base class which has refresh() and token + creds = cast(google.auth.credentials.Credentials, creds) + creds.refresh(google.auth.transport.requests.Request()) + credentials = creds + project_id = project_id or detected_project_id + + if project_id is None: + raise ValueError( + "project_id must be provided or available from default credentials" + ) + + self._credentials = credentials + self._project_id = project_id + self._region = region + self._fixed_access_token = access_token + + def get_auth_token() -> str: + if self._fixed_access_token: + return self._fixed_access_token + creds = self._credentials + if creds is None: + raise ValueError("No credentials available") + # Only refresh when the token is expired or missing. + # This avoids a blocking HTTP round-trip on every request and + # minimises event-loop blocking when called from async paths + # (the Speakeasy-generated basesdk always calls security + # callables synchronously). + if not creds.valid: + creds.refresh(google.auth.transport.requests.Request()) + token = creds.token + if token is None: + raise ValueError("Failed to obtain access token") + return token + + if server_url is None: + server_url = f"https://{region}-aiplatform.googleapis.com" + + client_supplied = True + if client is None: + client = httpx.Client(follow_redirects=True) + client_supplied = False + + assert issubclass( + type(client), HttpClient + ), "The provided client must implement the HttpClient protocol." + + async_client_supplied = True + if async_client is None: + async_client = httpx.AsyncClient(follow_redirects=True) + async_client_supplied = False + + if debug_logger is None: + debug_logger = get_default_logger() + + assert issubclass( + type(async_client), AsyncHttpClient + ), "The provided async_client must implement the AsyncHttpClient protocol." + + def get_security() -> models.Security: + return models.Security(api_key=get_auth_token()) + + security: Callable[[], models.Security] = get_security + + if url_params is not None: + server_url = utils.template_url(server_url, url_params) + + BaseSDK.__init__( + self, + SDKConfiguration( + client=client, + client_supplied=client_supplied, + async_client=async_client, + async_client_supplied=async_client_supplied, + security=security, + server_url=server_url, + server=server, + retry_config=retry_config, + timeout_ms=timeout_ms, + debug_logger=debug_logger, + ), + parent_ref=self, + ) + + hooks = SDKHooks() + self.sdk_configuration.__dict__["_hooks"] = hooks + + # Register hook that builds Vertex AI URL path + hooks.register_before_request_hook(GCPVertexAIPathHook(project_id, region)) + + current_server_url, *_ = self.sdk_configuration.get_server_details() + server_url, self.sdk_configuration.client = hooks.sdk_init( + current_server_url, client + ) + if current_server_url != server_url: + self.sdk_configuration.server_url = server_url + + weakref.finalize( + self, + close_clients, + cast(ClientOwner, self.sdk_configuration), + self.sdk_configuration.client, + self.sdk_configuration.client_supplied, + self.sdk_configuration.async_client, + self.sdk_configuration.async_client_supplied, + ) + + def dynamic_import(self, modname, retries=3): + last_exc: Optional[Exception] = None + for attempt in range(retries): + try: + return importlib.import_module(modname) + except (KeyError, ImportError, ModuleNotFoundError) as e: + last_exc = e + # Clear any half-initialized module and retry + sys.modules.pop(modname, None) + if attempt == retries - 1: + break + raise ImportError( + f"Failed to import module '{modname}' after {retries} attempts" + ) from last_exc + + def __getattr__(self, name: str): + if name in self._sub_sdk_map: + module_path, class_name = self._sub_sdk_map[name] + try: + module = self.dynamic_import(module_path) + klass = getattr(module, class_name) + instance = klass(self.sdk_configuration, parent_ref=self) + setattr(self, name, instance) + return instance + except ImportError as e: + raise AttributeError( + f"Failed to import module {module_path} for attribute {name}: {e}" + ) from e + except AttributeError as e: + raise AttributeError( + f"Failed to find class {class_name} in module {module_path} for attribute {name}: {e}" + ) from e + + raise AttributeError( + f"'{type(self).__name__}' object has no attribute '{name}'" + ) + + def __dir__(self): + default_attrs = list(super().__dir__()) + lazy_attrs = list(self._sub_sdk_map.keys()) + return sorted(list(set(default_attrs + lazy_attrs))) + + def __enter__(self): + return self + + async def __aenter__(self): + return self + + def __exit__(self, _exc_type, _exc_val, _exc_tb): + if ( + self.sdk_configuration.client is not None + and not self.sdk_configuration.client_supplied + ): + self.sdk_configuration.client.close() + self.sdk_configuration.client = None + + async def __aexit__(self, _exc_type, _exc_val, _exc_tb): + if ( + self.sdk_configuration.async_client is not None + and not self.sdk_configuration.async_client_supplied + ): + await self.sdk_configuration.async_client.aclose() + self.sdk_configuration.async_client = None diff --git a/packages/gcp/src/mistralai/gcp/client/sdkconfiguration.py b/packages/gcp/src/mistralai/gcp/client/sdkconfiguration.py new file mode 100644 index 00000000..d56a634f --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/sdkconfiguration.py @@ -0,0 +1,53 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from ._version import ( + __gen_version__, + __openapi_doc_version__, + __user_agent__, + __version__, +) +from .httpclient import AsyncHttpClient, HttpClient +from .utils import Logger, RetryConfig, remove_suffix +from dataclasses import dataclass +from mistralai.gcp.client import models +from mistralai.gcp.client.types import OptionalNullable, UNSET +from pydantic import Field +from typing import Callable, Dict, Optional, Tuple, Union + + +SERVER_EU = "eu" +r"""EU Production server""" +SERVERS = { + SERVER_EU: "https://api.mistral.ai", +} +"""Contains the list of servers available to the SDK""" + + +@dataclass +class SDKConfiguration: + client: Union[HttpClient, None] + client_supplied: bool + async_client: Union[AsyncHttpClient, None] + async_client_supplied: bool + debug_logger: Logger + security: Optional[Union[models.Security, Callable[[], models.Security]]] = None + server_url: Optional[str] = "" + server: Optional[str] = "" + language: str = "python" + openapi_doc_version: str = __openapi_doc_version__ + sdk_version: str = __version__ + gen_version: str = __gen_version__ + user_agent: str = __user_agent__ + retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) + timeout_ms: Optional[int] = None + + def get_server_details(self) -> Tuple[str, Dict[str, str]]: + if self.server_url is not None and self.server_url: + return remove_suffix(self.server_url, "/"), {} + if not self.server: + self.server = SERVER_EU + + if self.server not in SERVERS: + raise ValueError(f'Invalid server "{self.server}"') + + return SERVERS[self.server], {} diff --git a/packages/gcp/src/mistralai/gcp/client/types/__init__.py b/packages/gcp/src/mistralai/gcp/client/types/__init__.py new file mode 100644 index 00000000..fc76fe0c --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/types/__init__.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basemodel import ( + BaseModel, + Nullable, + OptionalNullable, + UnrecognizedInt, + UnrecognizedStr, + UNSET, + UNSET_SENTINEL, +) + +__all__ = [ + "BaseModel", + "Nullable", + "OptionalNullable", + "UnrecognizedInt", + "UnrecognizedStr", + "UNSET", + "UNSET_SENTINEL", +] diff --git a/packages/gcp/src/mistralai/gcp/client/types/basemodel.py b/packages/gcp/src/mistralai/gcp/client/types/basemodel.py new file mode 100644 index 00000000..a9a640a1 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/types/basemodel.py @@ -0,0 +1,77 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from pydantic import ConfigDict, model_serializer +from pydantic import BaseModel as PydanticBaseModel +from pydantic_core import core_schema +from typing import TYPE_CHECKING, Any, Literal, Optional, TypeVar, Union +from typing_extensions import TypeAliasType, TypeAlias + + +class BaseModel(PydanticBaseModel): + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, protected_namespaces=() + ) + + +class Unset(BaseModel): + @model_serializer(mode="plain") + def serialize_model(self): + return UNSET_SENTINEL + + def __bool__(self) -> Literal[False]: + return False + + +UNSET = Unset() +UNSET_SENTINEL = "~?~unset~?~sentinel~?~" + + +T = TypeVar("T") +if TYPE_CHECKING: + Nullable: TypeAlias = Union[T, None] + OptionalNullable: TypeAlias = Union[Optional[Nullable[T]], Unset] +else: + Nullable = TypeAliasType("Nullable", Union[T, None], type_params=(T,)) + OptionalNullable = TypeAliasType( + "OptionalNullable", Union[Optional[Nullable[T]], Unset], type_params=(T,) + ) + + +class UnrecognizedStr(str): + @classmethod + def __get_pydantic_core_schema__(cls, _source_type: Any, _handler: Any) -> core_schema.CoreSchema: + # Make UnrecognizedStr only work in lax mode, not strict mode + # This makes it a "fallback" option when more specific types (like Literals) don't match + def validate_lax(v: Any) -> 'UnrecognizedStr': + if isinstance(v, cls): + return v + return cls(str(v)) + + # Use lax_or_strict_schema where strict always fails + # This forces Pydantic to prefer other union members in strict mode + # and only fall back to UnrecognizedStr in lax mode + return core_schema.lax_or_strict_schema( + lax_schema=core_schema.chain_schema([ + core_schema.str_schema(), + core_schema.no_info_plain_validator_function(validate_lax) + ]), + strict_schema=core_schema.none_schema(), # Always fails in strict mode + ) + + +class UnrecognizedInt(int): + @classmethod + def __get_pydantic_core_schema__(cls, _source_type: Any, _handler: Any) -> core_schema.CoreSchema: + # Make UnrecognizedInt only work in lax mode, not strict mode + # This makes it a "fallback" option when more specific types (like Literals) don't match + def validate_lax(v: Any) -> 'UnrecognizedInt': + if isinstance(v, cls): + return v + return cls(int(v)) + return core_schema.lax_or_strict_schema( + lax_schema=core_schema.chain_schema([ + core_schema.int_schema(), + core_schema.no_info_plain_validator_function(validate_lax) + ]), + strict_schema=core_schema.none_schema(), # Always fails in strict mode + ) diff --git a/packages/gcp/src/mistralai/gcp/client/utils/__init__.py b/packages/gcp/src/mistralai/gcp/client/utils/__init__.py new file mode 100644 index 00000000..b488c2df --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/utils/__init__.py @@ -0,0 +1,178 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from typing import Any, TYPE_CHECKING, Callable, TypeVar +import asyncio + +from .dynamic_imports import lazy_getattr, lazy_dir + +_T = TypeVar("_T") + + +async def run_sync_in_thread(func: Callable[..., _T], *args) -> _T: + """Run a synchronous function in a thread pool to avoid blocking the event loop.""" + return await asyncio.to_thread(func, *args) + + +if TYPE_CHECKING: + from .annotations import get_discriminator + from .datetimes import parse_datetime + from .enums import OpenEnumMeta + from .unions import parse_open_union + from .headers import get_headers, get_response_headers + from .metadata import ( + FieldMetadata, + find_metadata, + FormMetadata, + HeaderMetadata, + MultipartFormMetadata, + PathParamMetadata, + QueryParamMetadata, + RequestMetadata, + SecurityMetadata, + ) + from .queryparams import get_query_params + from .retries import BackoffStrategy, Retries, retry, retry_async, RetryConfig + from .requestbodies import serialize_request_body, SerializedRequestBody + from .security import get_security + from .serializers import ( + get_pydantic_model, + marshal_json, + unmarshal, + unmarshal_json, + serialize_decimal, + serialize_float, + serialize_int, + stream_to_text, + stream_to_text_async, + stream_to_bytes, + stream_to_bytes_async, + validate_const, + validate_decimal, + validate_float, + validate_int, + ) + from .url import generate_url, template_url, remove_suffix + from .values import ( + get_global_from_env, + match_content_type, + match_status_codes, + match_response, + cast_partial, + ) + from .logger import Logger, get_body_content, get_default_logger + +__all__ = [ + "BackoffStrategy", + "FieldMetadata", + "find_metadata", + "FormMetadata", + "generate_url", + "get_body_content", + "get_default_logger", + "get_discriminator", + "parse_datetime", + "get_global_from_env", + "get_headers", + "get_pydantic_model", + "get_query_params", + "get_response_headers", + "get_security", + "HeaderMetadata", + "Logger", + "marshal_json", + "match_content_type", + "match_status_codes", + "match_response", + "MultipartFormMetadata", + "OpenEnumMeta", + "parse_open_union", + "PathParamMetadata", + "QueryParamMetadata", + "remove_suffix", + "Retries", + "retry", + "retry_async", + "RetryConfig", + "RequestMetadata", + "SecurityMetadata", + "serialize_decimal", + "serialize_float", + "serialize_int", + "serialize_request_body", + "SerializedRequestBody", + "stream_to_text", + "stream_to_text_async", + "stream_to_bytes", + "stream_to_bytes_async", + "template_url", + "unmarshal", + "unmarshal_json", + "validate_decimal", + "validate_const", + "validate_float", + "validate_int", + "cast_partial", +] + +_dynamic_imports: dict[str, str] = { + "BackoffStrategy": ".retries", + "FieldMetadata": ".metadata", + "find_metadata": ".metadata", + "FormMetadata": ".metadata", + "generate_url": ".url", + "get_body_content": ".logger", + "get_default_logger": ".logger", + "get_discriminator": ".annotations", + "parse_datetime": ".datetimes", + "get_global_from_env": ".values", + "get_headers": ".headers", + "get_pydantic_model": ".serializers", + "get_query_params": ".queryparams", + "get_response_headers": ".headers", + "get_security": ".security", + "HeaderMetadata": ".metadata", + "Logger": ".logger", + "marshal_json": ".serializers", + "match_content_type": ".values", + "match_status_codes": ".values", + "match_response": ".values", + "MultipartFormMetadata": ".metadata", + "OpenEnumMeta": ".enums", + "parse_open_union": ".unions", + "PathParamMetadata": ".metadata", + "QueryParamMetadata": ".metadata", + "remove_suffix": ".url", + "Retries": ".retries", + "retry": ".retries", + "retry_async": ".retries", + "RetryConfig": ".retries", + "RequestMetadata": ".metadata", + "SecurityMetadata": ".metadata", + "serialize_decimal": ".serializers", + "serialize_float": ".serializers", + "serialize_int": ".serializers", + "serialize_request_body": ".requestbodies", + "SerializedRequestBody": ".requestbodies", + "stream_to_text": ".serializers", + "stream_to_text_async": ".serializers", + "stream_to_bytes": ".serializers", + "stream_to_bytes_async": ".serializers", + "template_url": ".url", + "unmarshal": ".serializers", + "unmarshal_json": ".serializers", + "validate_decimal": ".serializers", + "validate_const": ".serializers", + "validate_float": ".serializers", + "validate_int": ".serializers", + "cast_partial": ".values", +} + + +def __getattr__(attr_name: str) -> Any: + return lazy_getattr( + attr_name, package=__package__, dynamic_imports=_dynamic_imports + ) + + +def __dir__(): + return lazy_dir(dynamic_imports=_dynamic_imports) diff --git a/packages/gcp/src/mistralai/gcp/client/utils/annotations.py b/packages/gcp/src/mistralai/gcp/client/utils/annotations.py new file mode 100644 index 00000000..12e0aa4f --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/utils/annotations.py @@ -0,0 +1,79 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from enum import Enum +from typing import Any, Optional + + +def get_discriminator(model: Any, fieldname: str, key: str) -> str: + """ + Recursively search for the discriminator attribute in a model. + + Args: + model (Any): The model to search within. + fieldname (str): The name of the field to search for. + key (str): The key to search for in dictionaries. + + Returns: + str: The name of the discriminator attribute. + + Raises: + ValueError: If the discriminator attribute is not found. + """ + upper_fieldname = fieldname.upper() + + def get_field_discriminator(field: Any) -> Optional[str]: + """Search for the discriminator attribute in a given field.""" + + if isinstance(field, dict): + if key in field: + return f"{field[key]}" + + if hasattr(field, fieldname): + attr = getattr(field, fieldname) + if isinstance(attr, Enum): + return f"{attr.value}" + return f"{attr}" + + if hasattr(field, upper_fieldname): + attr = getattr(field, upper_fieldname) + if isinstance(attr, Enum): + return f"{attr.value}" + return f"{attr}" + + return None + + def search_nested_discriminator(obj: Any) -> Optional[str]: + """Recursively search for discriminator in nested structures.""" + # First try direct field lookup + discriminator = get_field_discriminator(obj) + if discriminator is not None: + return discriminator + + # If it's a dict, search in nested values + if isinstance(obj, dict): + for value in obj.values(): + if isinstance(value, list): + # Search in list items + for item in value: + nested_discriminator = search_nested_discriminator(item) + if nested_discriminator is not None: + return nested_discriminator + elif isinstance(value, dict): + # Search in nested dict + nested_discriminator = search_nested_discriminator(value) + if nested_discriminator is not None: + return nested_discriminator + + return None + + if isinstance(model, list): + for field in model: + discriminator = search_nested_discriminator(field) + if discriminator is not None: + return discriminator + + discriminator = search_nested_discriminator(model) + if discriminator is not None: + return discriminator + + raise ValueError(f"Could not find discriminator field {fieldname} in {model}") diff --git a/packages/gcp/src/mistralai/gcp/client/utils/datetimes.py b/packages/gcp/src/mistralai/gcp/client/utils/datetimes.py new file mode 100644 index 00000000..a6c52cd6 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/utils/datetimes.py @@ -0,0 +1,23 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from datetime import datetime +import sys + + +def parse_datetime(datetime_string: str) -> datetime: + """ + Convert a RFC 3339 / ISO 8601 formatted string into a datetime object. + Python versions 3.11 and later support parsing RFC 3339 directly with + datetime.fromisoformat(), but for earlier versions, this function + encapsulates the necessary extra logic. + """ + # Python 3.11 and later can parse RFC 3339 directly + if sys.version_info >= (3, 11): + return datetime.fromisoformat(datetime_string) + + # For Python 3.10 and earlier, a common ValueError is trailing 'Z' suffix, + # so fix that upfront. + if datetime_string.endswith("Z"): + datetime_string = datetime_string[:-1] + "+00:00" + + return datetime.fromisoformat(datetime_string) diff --git a/packages/gcp/src/mistralai/gcp/client/utils/dynamic_imports.py b/packages/gcp/src/mistralai/gcp/client/utils/dynamic_imports.py new file mode 100644 index 00000000..673edf82 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/utils/dynamic_imports.py @@ -0,0 +1,54 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from importlib import import_module +import builtins +import sys + + +def dynamic_import(package, modname, retries=3): + """Import a module relative to package, retrying on KeyError from half-initialized modules.""" + for attempt in range(retries): + try: + return import_module(modname, package) + except KeyError: + sys.modules.pop(modname, None) + if attempt == retries - 1: + break + raise KeyError(f"Failed to import module '{modname}' after {retries} attempts") + + +def lazy_getattr(attr_name, *, package, dynamic_imports, sub_packages=None): + """Module-level __getattr__ that lazily loads from a dynamic_imports mapping. + + Args: + attr_name: The attribute being looked up. + package: The caller's __package__ (for relative imports). + dynamic_imports: Dict mapping attribute names to relative module paths. + sub_packages: Optional list of subpackage names to lazy-load. + """ + module_name = dynamic_imports.get(attr_name) + if module_name is not None: + try: + module = dynamic_import(package, module_name) + return getattr(module, attr_name) + except ImportError as e: + raise ImportError( + f"Failed to import {attr_name} from {module_name}: {e}" + ) from e + except AttributeError as e: + raise AttributeError( + f"Failed to get {attr_name} from {module_name}: {e}" + ) from e + + if sub_packages and attr_name in sub_packages: + return import_module(f".{attr_name}", package) + + raise AttributeError(f"module '{package}' has no attribute '{attr_name}'") + + +def lazy_dir(*, dynamic_imports, sub_packages=None): + """Module-level __dir__ that lists lazily-loadable attributes.""" + lazy_attrs = builtins.list(dynamic_imports.keys()) + if sub_packages: + lazy_attrs.extend(sub_packages) + return builtins.sorted(lazy_attrs) diff --git a/packages/gcp/src/mistralai/gcp/client/utils/enums.py b/packages/gcp/src/mistralai/gcp/client/utils/enums.py new file mode 100644 index 00000000..3324e1bc --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/utils/enums.py @@ -0,0 +1,134 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import enum +import sys +from typing import Any + +from pydantic_core import core_schema + + +class OpenEnumMeta(enum.EnumMeta): + # The __call__ method `boundary` kwarg was added in 3.11 and must be present + # for pyright. Refer also: https://github.com/pylint-dev/pylint/issues/9622 + # pylint: disable=unexpected-keyword-arg + # The __call__ method `values` varg must be named for pyright. + # pylint: disable=keyword-arg-before-vararg + + if sys.version_info >= (3, 11): + def __call__( + cls, value, names=None, *values, module=None, qualname=None, type=None, start=1, boundary=None + ): + # The `type` kwarg also happens to be a built-in that pylint flags as + # redeclared. Safe to ignore this lint rule with this scope. + # pylint: disable=redefined-builtin + + if names is not None: + return super().__call__( + value, + names=names, + *values, + module=module, + qualname=qualname, + type=type, + start=start, + boundary=boundary, + ) + + try: + return super().__call__( + value, + names=names, # pyright: ignore[reportArgumentType] + *values, + module=module, + qualname=qualname, + type=type, + start=start, + boundary=boundary, + ) + except ValueError: + return value + else: + def __call__( + cls, value, names=None, *, module=None, qualname=None, type=None, start=1 + ): + # The `type` kwarg also happens to be a built-in that pylint flags as + # redeclared. Safe to ignore this lint rule with this scope. + # pylint: disable=redefined-builtin + + if names is not None: + return super().__call__( + value, + names=names, + module=module, + qualname=qualname, + type=type, + start=start, + ) + + try: + return super().__call__( + value, + names=names, # pyright: ignore[reportArgumentType] + module=module, + qualname=qualname, + type=type, + start=start, + ) + except ValueError: + return value + + def __new__(mcs, name, bases, namespace, **kwargs): + cls = super().__new__(mcs, name, bases, namespace, **kwargs) + + # Add __get_pydantic_core_schema__ to make open enums work correctly + # in union discrimination. In strict mode (used by Pydantic for unions), + # only known enum values match. In lax mode, unknown values are accepted. + def __get_pydantic_core_schema__( + cls_inner: Any, _source_type: Any, _handler: Any + ) -> core_schema.CoreSchema: + # Create a validator that only accepts known enum values (for strict mode) + def validate_strict(v: Any) -> Any: + if isinstance(v, cls_inner): + return v + # Use the parent EnumMeta's __call__ which raises ValueError for unknown values + return enum.EnumMeta.__call__(cls_inner, v) + + # Create a lax validator that accepts unknown values + def validate_lax(v: Any) -> Any: + if isinstance(v, cls_inner): + return v + try: + return enum.EnumMeta.__call__(cls_inner, v) + except ValueError: + # Return the raw value for unknown enum values + return v + + # Determine the base type schema (str or int) + is_int_enum = False + for base in cls_inner.__mro__: + if base is int: + is_int_enum = True + break + if base is str: + break + + base_schema = ( + core_schema.int_schema() + if is_int_enum + else core_schema.str_schema() + ) + + # Use lax_or_strict_schema: + # - strict mode: only known enum values match (raises ValueError for unknown) + # - lax mode: accept any value, return enum member or raw value + return core_schema.lax_or_strict_schema( + lax_schema=core_schema.chain_schema( + [base_schema, core_schema.no_info_plain_validator_function(validate_lax)] + ), + strict_schema=core_schema.chain_schema( + [base_schema, core_schema.no_info_plain_validator_function(validate_strict)] + ), + ) + + setattr(cls, "__get_pydantic_core_schema__", classmethod(__get_pydantic_core_schema__)) + return cls diff --git a/packages/gcp/src/mistralai/gcp/client/utils/eventstreaming.py b/packages/gcp/src/mistralai/gcp/client/utils/eventstreaming.py new file mode 100644 index 00000000..f2052fc2 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/utils/eventstreaming.py @@ -0,0 +1,280 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import re +import json +from dataclasses import dataclass, asdict +from typing import ( + Any, + Callable, + Generic, + TypeVar, + Optional, + Generator, + AsyncGenerator, + Tuple, +) +import httpx + +T = TypeVar("T") + + +class EventStream(Generic[T]): + # Holds a reference to the SDK client to avoid it being garbage collected + # and cause termination of the underlying httpx client. + client_ref: Optional[object] + response: httpx.Response + generator: Generator[T, None, None] + _closed: bool + + def __init__( + self, + response: httpx.Response, + decoder: Callable[[str], T], + sentinel: Optional[str] = None, + client_ref: Optional[object] = None, + ): + self.response = response + self.generator = stream_events(response, decoder, sentinel) + self.client_ref = client_ref + self._closed = False + + def __iter__(self): + return self + + def __next__(self): + if self._closed: + raise StopIteration + return next(self.generator) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self._closed = True + self.response.close() + + +class EventStreamAsync(Generic[T]): + # Holds a reference to the SDK client to avoid it being garbage collected + # and cause termination of the underlying httpx client. + client_ref: Optional[object] + response: httpx.Response + generator: AsyncGenerator[T, None] + _closed: bool + + def __init__( + self, + response: httpx.Response, + decoder: Callable[[str], T], + sentinel: Optional[str] = None, + client_ref: Optional[object] = None, + ): + self.response = response + self.generator = stream_events_async(response, decoder, sentinel) + self.client_ref = client_ref + self._closed = False + + def __aiter__(self): + return self + + async def __anext__(self): + if self._closed: + raise StopAsyncIteration + return await self.generator.__anext__() + + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc_val, exc_tb): + self._closed = True + await self.response.aclose() + + +@dataclass +class ServerEvent: + id: Optional[str] = None + event: Optional[str] = None + data: Any = None + retry: Optional[int] = None + + +MESSAGE_BOUNDARIES = [ + b"\r\n\r\n", + b"\r\n\r", + b"\r\n\n", + b"\r\r\n", + b"\n\r\n", + b"\r\r", + b"\n\r", + b"\n\n", +] + +UTF8_BOM = b"\xef\xbb\xbf" + + +async def stream_events_async( + response: httpx.Response, + decoder: Callable[[str], T], + sentinel: Optional[str] = None, +) -> AsyncGenerator[T, None]: + buffer = bytearray() + position = 0 + event_id: Optional[str] = None + async for chunk in response.aiter_bytes(): + if len(buffer) == 0 and chunk.startswith(UTF8_BOM): + chunk = chunk[len(UTF8_BOM) :] + buffer += chunk + for i in range(position, len(buffer)): + char = buffer[i : i + 1] + seq: Optional[bytes] = None + if char in [b"\r", b"\n"]: + for boundary in MESSAGE_BOUNDARIES: + seq = _peek_sequence(i, buffer, boundary) + if seq is not None: + break + if seq is None: + continue + + block = buffer[position:i] + position = i + len(seq) + event, discard, event_id = _parse_event( + raw=block, decoder=decoder, sentinel=sentinel, event_id=event_id + ) + if event is not None: + yield event + if discard: + await response.aclose() + return + + if position > 0: + buffer = buffer[position:] + position = 0 + + event, discard, _ = _parse_event( + raw=buffer, decoder=decoder, sentinel=sentinel, event_id=event_id + ) + if event is not None: + yield event + + +def stream_events( + response: httpx.Response, + decoder: Callable[[str], T], + sentinel: Optional[str] = None, +) -> Generator[T, None, None]: + buffer = bytearray() + position = 0 + event_id: Optional[str] = None + for chunk in response.iter_bytes(): + if len(buffer) == 0 and chunk.startswith(UTF8_BOM): + chunk = chunk[len(UTF8_BOM) :] + buffer += chunk + for i in range(position, len(buffer)): + char = buffer[i : i + 1] + seq: Optional[bytes] = None + if char in [b"\r", b"\n"]: + for boundary in MESSAGE_BOUNDARIES: + seq = _peek_sequence(i, buffer, boundary) + if seq is not None: + break + if seq is None: + continue + + block = buffer[position:i] + position = i + len(seq) + event, discard, event_id = _parse_event( + raw=block, decoder=decoder, sentinel=sentinel, event_id=event_id + ) + if event is not None: + yield event + if discard: + response.close() + return + + if position > 0: + buffer = buffer[position:] + position = 0 + + event, discard, _ = _parse_event( + raw=buffer, decoder=decoder, sentinel=sentinel, event_id=event_id + ) + if event is not None: + yield event + + +def _parse_event( + *, + raw: bytearray, + decoder: Callable[[str], T], + sentinel: Optional[str] = None, + event_id: Optional[str] = None, +) -> Tuple[Optional[T], bool, Optional[str]]: + block = raw.decode() + lines = re.split(r"\r?\n|\r", block) + publish = False + event = ServerEvent() + data = "" + for line in lines: + if not line: + continue + + delim = line.find(":") + if delim == 0: + continue + + field = line + value = "" + if delim > 0: + field = line[0:delim] + value = line[delim + 1 :] if delim < len(line) - 1 else "" + if len(value) and value[0] == " ": + value = value[1:] + + if field == "event": + event.event = value + publish = True + elif field == "data": + data += value + "\n" + publish = True + elif field == "id": + publish = True + if "\x00" not in value: + event_id = value + elif field == "retry": + if value.isdigit(): + event.retry = int(value) + publish = True + + event.id = event_id + + if sentinel and data == f"{sentinel}\n": + return None, True, event_id + + if data: + data = data[:-1] + try: + event.data = json.loads(data) + except json.JSONDecodeError: + event.data = data + + out = None + if publish: + out_dict = { + k: v + for k, v in asdict(event).items() + if v is not None or (k == "data" and data) + } + out = decoder(json.dumps(out_dict)) + + return out, False, event_id + + +def _peek_sequence(position: int, buffer: bytearray, sequence: bytes): + if len(sequence) > (len(buffer) - position): + return None + + for i, seq in enumerate(sequence): + if buffer[position + i] != seq: + return None + + return sequence diff --git a/packages/gcp/src/mistralai/gcp/client/utils/forms.py b/packages/gcp/src/mistralai/gcp/client/utils/forms.py new file mode 100644 index 00000000..1e550bd5 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/utils/forms.py @@ -0,0 +1,234 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from typing import ( + Any, + Dict, + get_type_hints, + List, + Tuple, +) +from pydantic import BaseModel +from pydantic.fields import FieldInfo + +from .serializers import marshal_json + +from .metadata import ( + FormMetadata, + MultipartFormMetadata, + find_field_metadata, +) +from .values import _is_set, _val_to_string + + +def _populate_form( + field_name: str, + explode: bool, + obj: Any, + delimiter: str, + form: Dict[str, List[str]], +): + if not _is_set(obj): + return form + + if isinstance(obj, BaseModel): + items = [] + + obj_fields: Dict[str, FieldInfo] = obj.__class__.model_fields + for name in obj_fields: + obj_field = obj_fields[name] + obj_field_name = obj_field.alias if obj_field.alias is not None else name + if obj_field_name == "": + continue + + val = getattr(obj, name) + if not _is_set(val): + continue + + if explode: + form[obj_field_name] = [_val_to_string(val)] + else: + items.append(f"{obj_field_name}{delimiter}{_val_to_string(val)}") + + if len(items) > 0: + form[field_name] = [delimiter.join(items)] + elif isinstance(obj, Dict): + items = [] + for key, value in obj.items(): + if not _is_set(value): + continue + + if explode: + form[key] = [_val_to_string(value)] + else: + items.append(f"{key}{delimiter}{_val_to_string(value)}") + + if len(items) > 0: + form[field_name] = [delimiter.join(items)] + elif isinstance(obj, List): + items = [] + + for value in obj: + if not _is_set(value): + continue + + if explode: + if not field_name in form: + form[field_name] = [] + form[field_name].append(_val_to_string(value)) + else: + items.append(_val_to_string(value)) + + if len(items) > 0: + form[field_name] = [delimiter.join([str(item) for item in items])] + else: + form[field_name] = [_val_to_string(obj)] + + return form + + +def _extract_file_properties(file_obj: Any) -> Tuple[str, Any, Any]: + """Extract file name, content, and content type from a file object.""" + file_fields: Dict[str, FieldInfo] = file_obj.__class__.model_fields + + file_name = "" + content = None + content_type = None + + for file_field_name in file_fields: + file_field = file_fields[file_field_name] + + file_metadata = find_field_metadata(file_field, MultipartFormMetadata) + if file_metadata is None: + continue + + if file_metadata.content: + content = getattr(file_obj, file_field_name, None) + elif file_field_name == "content_type": + content_type = getattr(file_obj, file_field_name, None) + else: + file_name = getattr(file_obj, file_field_name) + + if file_name == "" or content is None: + raise ValueError("invalid multipart/form-data file") + + return file_name, content, content_type + + +def serialize_multipart_form( + media_type: str, request: Any +) -> Tuple[str, Dict[str, Any], List[Tuple[str, Any]]]: + form: Dict[str, Any] = {} + files: List[Tuple[str, Any]] = [] + + if not isinstance(request, BaseModel): + raise TypeError("invalid request body type") + + request_fields: Dict[str, FieldInfo] = request.__class__.model_fields + request_field_types = get_type_hints(request.__class__) + + for name in request_fields: + field = request_fields[name] + + val = getattr(request, name) + if not _is_set(val): + continue + + field_metadata = find_field_metadata(field, MultipartFormMetadata) + if not field_metadata: + continue + + f_name = field.alias if field.alias else name + + if field_metadata.file: + if isinstance(val, List): + # Handle array of files + array_field_name = f_name + for file_obj in val: + if not _is_set(file_obj): + continue + + file_name, content, content_type = _extract_file_properties( + file_obj + ) + + if content_type is not None: + files.append( + (array_field_name, (file_name, content, content_type)) + ) + else: + files.append((array_field_name, (file_name, content))) + else: + # Handle single file + file_name, content, content_type = _extract_file_properties(val) + + if content_type is not None: + files.append((f_name, (file_name, content, content_type))) + else: + files.append((f_name, (file_name, content))) + elif field_metadata.json: + files.append( + ( + f_name, + ( + None, + marshal_json(val, request_field_types[name]), + "application/json", + ), + ) + ) + else: + if isinstance(val, List): + values = [] + + for value in val: + if not _is_set(value): + continue + values.append(_val_to_string(value)) + + array_field_name = f_name + form[array_field_name] = values + else: + form[f_name] = _val_to_string(val) + return media_type, form, files + + +def serialize_form_data(data: Any) -> Dict[str, Any]: + form: Dict[str, List[str]] = {} + + if isinstance(data, BaseModel): + data_fields: Dict[str, FieldInfo] = data.__class__.model_fields + data_field_types = get_type_hints(data.__class__) + for name in data_fields: + field = data_fields[name] + + val = getattr(data, name) + if not _is_set(val): + continue + + metadata = find_field_metadata(field, FormMetadata) + if metadata is None: + continue + + f_name = field.alias if field.alias is not None else name + + if metadata.json: + form[f_name] = [marshal_json(val, data_field_types[name])] + else: + if metadata.style == "form": + _populate_form( + f_name, + metadata.explode, + val, + ",", + form, + ) + else: + raise ValueError(f"Invalid form style for field {name}") + elif isinstance(data, Dict): + for key, value in data.items(): + if _is_set(value): + form[key] = [_val_to_string(value)] + else: + raise TypeError(f"Invalid request body type {type(data)} for form data") + + return form diff --git a/packages/gcp/src/mistralai/gcp/client/utils/headers.py b/packages/gcp/src/mistralai/gcp/client/utils/headers.py new file mode 100644 index 00000000..37864cbb --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/utils/headers.py @@ -0,0 +1,136 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from typing import ( + Any, + Dict, + List, + Optional, +) +from httpx import Headers +from pydantic import BaseModel +from pydantic.fields import FieldInfo + +from .metadata import ( + HeaderMetadata, + find_field_metadata, +) + +from .values import _is_set, _populate_from_globals, _val_to_string + + +def get_headers(headers_params: Any, gbls: Optional[Any] = None) -> Dict[str, str]: + headers: Dict[str, str] = {} + + globals_already_populated = [] + if _is_set(headers_params): + globals_already_populated = _populate_headers(headers_params, gbls, headers, []) + if _is_set(gbls): + _populate_headers(gbls, None, headers, globals_already_populated) + + return headers + + +def _populate_headers( + headers_params: Any, + gbls: Any, + header_values: Dict[str, str], + skip_fields: List[str], +) -> List[str]: + globals_already_populated: List[str] = [] + + if not isinstance(headers_params, BaseModel): + return globals_already_populated + + param_fields: Dict[str, FieldInfo] = headers_params.__class__.model_fields + for name in param_fields: + if name in skip_fields: + continue + + field = param_fields[name] + f_name = field.alias if field.alias is not None else name + + metadata = find_field_metadata(field, HeaderMetadata) + if metadata is None: + continue + + value, global_found = _populate_from_globals( + name, getattr(headers_params, name), HeaderMetadata, gbls + ) + if global_found: + globals_already_populated.append(name) + value = _serialize_header(metadata.explode, value) + + if value != "": + header_values[f_name] = value + + return globals_already_populated + + +def _serialize_header(explode: bool, obj: Any) -> str: + if not _is_set(obj): + return "" + + if isinstance(obj, BaseModel): + items = [] + obj_fields: Dict[str, FieldInfo] = obj.__class__.model_fields + for name in obj_fields: + obj_field = obj_fields[name] + obj_param_metadata = find_field_metadata(obj_field, HeaderMetadata) + + if not obj_param_metadata: + continue + + f_name = obj_field.alias if obj_field.alias is not None else name + + val = getattr(obj, name) + if not _is_set(val): + continue + + if explode: + items.append(f"{f_name}={_val_to_string(val)}") + else: + items.append(f_name) + items.append(_val_to_string(val)) + + if len(items) > 0: + return ",".join(items) + elif isinstance(obj, Dict): + items = [] + + for key, value in obj.items(): + if not _is_set(value): + continue + + if explode: + items.append(f"{key}={_val_to_string(value)}") + else: + items.append(key) + items.append(_val_to_string(value)) + + if len(items) > 0: + return ",".join([str(item) for item in items]) + elif isinstance(obj, List): + items = [] + + for value in obj: + if not _is_set(value): + continue + + items.append(_val_to_string(value)) + + if len(items) > 0: + return ",".join(items) + elif _is_set(obj): + return f"{_val_to_string(obj)}" + + return "" + + +def get_response_headers(headers: Headers) -> Dict[str, List[str]]: + res: Dict[str, List[str]] = {} + for k, v in headers.items(): + if not k in res: + res[k] = [] + + res[k].append(v) + return res diff --git a/packages/gcp/src/mistralai/gcp/client/utils/logger.py b/packages/gcp/src/mistralai/gcp/client/utils/logger.py new file mode 100644 index 00000000..b661aff6 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/utils/logger.py @@ -0,0 +1,22 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import httpx +from typing import Any, Protocol + + +class Logger(Protocol): + def debug(self, msg: str, *args: Any, **kwargs: Any) -> None: + pass + + +class NoOpLogger: + def debug(self, msg: str, *args: Any, **kwargs: Any) -> None: + pass + + +def get_body_content(req: httpx.Request) -> str: + return "" if not hasattr(req, "_content") else str(req.content) + + +def get_default_logger() -> Logger: + return NoOpLogger() diff --git a/packages/gcp/src/mistralai/gcp/client/utils/metadata.py b/packages/gcp/src/mistralai/gcp/client/utils/metadata.py new file mode 100644 index 00000000..173b3e5c --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/utils/metadata.py @@ -0,0 +1,118 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from typing import Optional, Type, TypeVar, Union +from dataclasses import dataclass +from pydantic.fields import FieldInfo + + +T = TypeVar("T") + + +@dataclass +class SecurityMetadata: + option: bool = False + scheme: bool = False + scheme_type: Optional[str] = None + sub_type: Optional[str] = None + field_name: Optional[str] = None + + def get_field_name(self, default: str) -> str: + return self.field_name or default + + +@dataclass +class ParamMetadata: + serialization: Optional[str] = None + style: str = "simple" + explode: bool = False + + +@dataclass +class PathParamMetadata(ParamMetadata): + pass + + +@dataclass +class QueryParamMetadata(ParamMetadata): + style: str = "form" + explode: bool = True + + +@dataclass +class HeaderMetadata(ParamMetadata): + pass + + +@dataclass +class RequestMetadata: + media_type: str = "application/octet-stream" + + +@dataclass +class MultipartFormMetadata: + file: bool = False + content: bool = False + json: bool = False + + +@dataclass +class FormMetadata: + json: bool = False + style: str = "form" + explode: bool = True + + +class FieldMetadata: + security: Optional[SecurityMetadata] = None + path: Optional[PathParamMetadata] = None + query: Optional[QueryParamMetadata] = None + header: Optional[HeaderMetadata] = None + request: Optional[RequestMetadata] = None + form: Optional[FormMetadata] = None + multipart: Optional[MultipartFormMetadata] = None + + def __init__( + self, + security: Optional[SecurityMetadata] = None, + path: Optional[Union[PathParamMetadata, bool]] = None, + query: Optional[Union[QueryParamMetadata, bool]] = None, + header: Optional[Union[HeaderMetadata, bool]] = None, + request: Optional[Union[RequestMetadata, bool]] = None, + form: Optional[Union[FormMetadata, bool]] = None, + multipart: Optional[Union[MultipartFormMetadata, bool]] = None, + ): + self.security = security + self.path = PathParamMetadata() if isinstance(path, bool) else path + self.query = QueryParamMetadata() if isinstance(query, bool) else query + self.header = HeaderMetadata() if isinstance(header, bool) else header + self.request = RequestMetadata() if isinstance(request, bool) else request + self.form = FormMetadata() if isinstance(form, bool) else form + self.multipart = ( + MultipartFormMetadata() if isinstance(multipart, bool) else multipart + ) + + +def find_field_metadata(field_info: FieldInfo, metadata_type: Type[T]) -> Optional[T]: + metadata = find_metadata(field_info, FieldMetadata) + if not metadata: + return None + + fields = metadata.__dict__ + + for field in fields: + if isinstance(fields[field], metadata_type): + return fields[field] + + return None + + +def find_metadata(field_info: FieldInfo, metadata_type: Type[T]) -> Optional[T]: + metadata = field_info.metadata + if not metadata: + return None + + for md in metadata: + if isinstance(md, metadata_type): + return md + + return None diff --git a/packages/gcp/src/mistralai/gcp/client/utils/queryparams.py b/packages/gcp/src/mistralai/gcp/client/utils/queryparams.py new file mode 100644 index 00000000..c04e0db8 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/utils/queryparams.py @@ -0,0 +1,217 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from typing import ( + Any, + Dict, + get_type_hints, + List, + Optional, +) + +from pydantic import BaseModel +from pydantic.fields import FieldInfo + +from .metadata import ( + QueryParamMetadata, + find_field_metadata, +) +from .values import ( + _get_serialized_params, + _is_set, + _populate_from_globals, + _val_to_string, +) +from .forms import _populate_form + + +def get_query_params( + query_params: Any, + gbls: Optional[Any] = None, + allow_empty_value: Optional[List[str]] = None, +) -> Dict[str, List[str]]: + params: Dict[str, List[str]] = {} + + globals_already_populated = _populate_query_params(query_params, gbls, params, [], allow_empty_value) + if _is_set(gbls): + _populate_query_params(gbls, None, params, globals_already_populated, allow_empty_value) + + return params + + +def _populate_query_params( + query_params: Any, + gbls: Any, + query_param_values: Dict[str, List[str]], + skip_fields: List[str], + allow_empty_value: Optional[List[str]] = None, +) -> List[str]: + globals_already_populated: List[str] = [] + + if not isinstance(query_params, BaseModel): + return globals_already_populated + + param_fields: Dict[str, FieldInfo] = query_params.__class__.model_fields + param_field_types = get_type_hints(query_params.__class__) + for name in param_fields: + if name in skip_fields: + continue + + field = param_fields[name] + + metadata = find_field_metadata(field, QueryParamMetadata) + if not metadata: + continue + + value = getattr(query_params, name) if _is_set(query_params) else None + + value, global_found = _populate_from_globals( + name, value, QueryParamMetadata, gbls + ) + if global_found: + globals_already_populated.append(name) + + f_name = field.alias if field.alias is not None else name + + allow_empty_set = set(allow_empty_value or []) + should_include_empty = f_name in allow_empty_set and ( + value is None or value == [] or value == "" + ) + + if should_include_empty: + query_param_values[f_name] = [""] + continue + + serialization = metadata.serialization + if serialization is not None: + serialized_parms = _get_serialized_params( + metadata, f_name, value, param_field_types[name] + ) + for key, value in serialized_parms.items(): + if key in query_param_values: + query_param_values[key].extend(value) + else: + query_param_values[key] = [value] + else: + style = metadata.style + if style == "deepObject": + _populate_deep_object_query_params(f_name, value, query_param_values) + elif style == "form": + _populate_delimited_query_params( + metadata, f_name, value, ",", query_param_values + ) + elif style == "pipeDelimited": + _populate_delimited_query_params( + metadata, f_name, value, "|", query_param_values + ) + else: + raise NotImplementedError( + f"query param style {style} not yet supported" + ) + + return globals_already_populated + + +def _populate_deep_object_query_params( + field_name: str, + obj: Any, + params: Dict[str, List[str]], +): + if not _is_set(obj): + return + + if isinstance(obj, BaseModel): + _populate_deep_object_query_params_basemodel(field_name, obj, params) + elif isinstance(obj, Dict): + _populate_deep_object_query_params_dict(field_name, obj, params) + + +def _populate_deep_object_query_params_basemodel( + prior_params_key: str, + obj: Any, + params: Dict[str, List[str]], +): + if not _is_set(obj) or not isinstance(obj, BaseModel): + return + + obj_fields: Dict[str, FieldInfo] = obj.__class__.model_fields + for name in obj_fields: + obj_field = obj_fields[name] + + f_name = obj_field.alias if obj_field.alias is not None else name + + params_key = f"{prior_params_key}[{f_name}]" + + obj_param_metadata = find_field_metadata(obj_field, QueryParamMetadata) + if not _is_set(obj_param_metadata): + continue + + obj_val = getattr(obj, name) + if not _is_set(obj_val): + continue + + if isinstance(obj_val, BaseModel): + _populate_deep_object_query_params_basemodel(params_key, obj_val, params) + elif isinstance(obj_val, Dict): + _populate_deep_object_query_params_dict(params_key, obj_val, params) + elif isinstance(obj_val, List): + _populate_deep_object_query_params_list(params_key, obj_val, params) + else: + params[params_key] = [_val_to_string(obj_val)] + + +def _populate_deep_object_query_params_dict( + prior_params_key: str, + value: Dict, + params: Dict[str, List[str]], +): + if not _is_set(value): + return + + for key, val in value.items(): + if not _is_set(val): + continue + + params_key = f"{prior_params_key}[{key}]" + + if isinstance(val, BaseModel): + _populate_deep_object_query_params_basemodel(params_key, val, params) + elif isinstance(val, Dict): + _populate_deep_object_query_params_dict(params_key, val, params) + elif isinstance(val, List): + _populate_deep_object_query_params_list(params_key, val, params) + else: + params[params_key] = [_val_to_string(val)] + + +def _populate_deep_object_query_params_list( + params_key: str, + value: List, + params: Dict[str, List[str]], +): + if not _is_set(value): + return + + for val in value: + if not _is_set(val): + continue + + if params.get(params_key) is None: + params[params_key] = [] + + params[params_key].append(_val_to_string(val)) + + +def _populate_delimited_query_params( + metadata: QueryParamMetadata, + field_name: str, + obj: Any, + delimiter: str, + query_param_values: Dict[str, List[str]], +): + _populate_form( + field_name, + metadata.explode, + obj, + delimiter, + query_param_values, + ) diff --git a/packages/gcp/src/mistralai/gcp/client/utils/requestbodies.py b/packages/gcp/src/mistralai/gcp/client/utils/requestbodies.py new file mode 100644 index 00000000..1de32b6d --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/utils/requestbodies.py @@ -0,0 +1,66 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import io +from dataclasses import dataclass +import re +from typing import ( + Any, + Optional, +) + +from .forms import serialize_form_data, serialize_multipart_form + +from .serializers import marshal_json + +SERIALIZATION_METHOD_TO_CONTENT_TYPE = { + "json": "application/json", + "form": "application/x-www-form-urlencoded", + "multipart": "multipart/form-data", + "raw": "application/octet-stream", + "string": "text/plain", +} + + +@dataclass +class SerializedRequestBody: + media_type: Optional[str] = None + content: Optional[Any] = None + data: Optional[Any] = None + files: Optional[Any] = None + + +def serialize_request_body( + request_body: Any, + nullable: bool, + optional: bool, + serialization_method: str, + request_body_type, +) -> Optional[SerializedRequestBody]: + if request_body is None: + if not nullable and optional: + return None + + media_type = SERIALIZATION_METHOD_TO_CONTENT_TYPE[serialization_method] + + serialized_request_body = SerializedRequestBody(media_type) + + if re.match(r"^(application|text)\/([^+]+\+)*json.*", media_type) is not None: + serialized_request_body.content = marshal_json(request_body, request_body_type) + elif re.match(r"^multipart\/.*", media_type) is not None: + ( + serialized_request_body.media_type, + serialized_request_body.data, + serialized_request_body.files, + ) = serialize_multipart_form(media_type, request_body) + elif re.match(r"^application\/x-www-form-urlencoded.*", media_type) is not None: + serialized_request_body.data = serialize_form_data(request_body) + elif isinstance(request_body, (bytes, bytearray, io.BytesIO, io.BufferedReader)): + serialized_request_body.content = request_body + elif isinstance(request_body, str): + serialized_request_body.content = request_body + else: + raise TypeError( + f"invalid request body type {type(request_body)} for mediaType {media_type}" + ) + + return serialized_request_body diff --git a/packages/gcp/src/mistralai/gcp/client/utils/retries.py b/packages/gcp/src/mistralai/gcp/client/utils/retries.py new file mode 100644 index 00000000..af07d4e9 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/utils/retries.py @@ -0,0 +1,271 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import asyncio +import random +import time +from datetime import datetime +from email.utils import parsedate_to_datetime +from typing import List, Optional + +import httpx + + +class BackoffStrategy: + initial_interval: int + max_interval: int + exponent: float + max_elapsed_time: int + + def __init__( + self, + initial_interval: int, + max_interval: int, + exponent: float, + max_elapsed_time: int, + ): + self.initial_interval = initial_interval + self.max_interval = max_interval + self.exponent = exponent + self.max_elapsed_time = max_elapsed_time + + +class RetryConfig: + strategy: str + backoff: BackoffStrategy + retry_connection_errors: bool + + def __init__( + self, strategy: str, backoff: BackoffStrategy, retry_connection_errors: bool + ): + self.strategy = strategy + self.backoff = backoff + self.retry_connection_errors = retry_connection_errors + + +class Retries: + config: RetryConfig + status_codes: List[str] + + def __init__(self, config: RetryConfig, status_codes: List[str]): + self.config = config + self.status_codes = status_codes + + +class TemporaryError(Exception): + response: httpx.Response + retry_after: Optional[int] + + def __init__(self, response: httpx.Response): + self.response = response + self.retry_after = _parse_retry_after_header(response) + + +class PermanentError(Exception): + inner: Exception + + def __init__(self, inner: Exception): + self.inner = inner + + +def _parse_retry_after_header(response: httpx.Response) -> Optional[int]: + """Parse Retry-After header from response. + + Returns: + Retry interval in milliseconds, or None if header is missing or invalid. + """ + retry_after_header = response.headers.get("retry-after") + if not retry_after_header: + return None + + try: + seconds = float(retry_after_header) + return round(seconds * 1000) + except ValueError: + pass + + try: + retry_date = parsedate_to_datetime(retry_after_header) + delta = (retry_date - datetime.now(retry_date.tzinfo)).total_seconds() + return round(max(0, delta) * 1000) + except (ValueError, TypeError): + pass + + return None + + +def _get_sleep_interval( + exception: Exception, + initial_interval: int, + max_interval: int, + exponent: float, + retries: int, +) -> float: + """Get sleep interval for retry with exponential backoff. + + Args: + exception: The exception that triggered the retry. + initial_interval: Initial retry interval in milliseconds. + max_interval: Maximum retry interval in milliseconds. + exponent: Base for exponential backoff calculation. + retries: Current retry attempt count. + + Returns: + Sleep interval in seconds. + """ + if ( + isinstance(exception, TemporaryError) + and exception.retry_after is not None + and exception.retry_after > 0 + ): + return exception.retry_after / 1000 + + sleep = (initial_interval / 1000) * exponent**retries + random.uniform(0, 1) + return min(sleep, max_interval / 1000) + + +def retry(func, retries: Retries): + if retries.config.strategy == "backoff": + + def do_request() -> httpx.Response: + res: httpx.Response + try: + res = func() + + for code in retries.status_codes: + if "X" in code.upper(): + code_range = int(code[0]) + + status_major = res.status_code / 100 + + if code_range <= status_major < code_range + 1: + raise TemporaryError(res) + else: + parsed_code = int(code) + + if res.status_code == parsed_code: + raise TemporaryError(res) + except (httpx.NetworkError, httpx.TimeoutException) as exception: + if retries.config.retry_connection_errors: + raise + + raise PermanentError(exception) from exception + except TemporaryError: + raise + except Exception as exception: + raise PermanentError(exception) from exception + + return res + + return retry_with_backoff( + do_request, + retries.config.backoff.initial_interval, + retries.config.backoff.max_interval, + retries.config.backoff.exponent, + retries.config.backoff.max_elapsed_time, + ) + + return func() + + +async def retry_async(func, retries: Retries): + if retries.config.strategy == "backoff": + + async def do_request() -> httpx.Response: + res: httpx.Response + try: + res = await func() + + for code in retries.status_codes: + if "X" in code.upper(): + code_range = int(code[0]) + + status_major = res.status_code / 100 + + if code_range <= status_major < code_range + 1: + raise TemporaryError(res) + else: + parsed_code = int(code) + + if res.status_code == parsed_code: + raise TemporaryError(res) + except (httpx.NetworkError, httpx.TimeoutException) as exception: + if retries.config.retry_connection_errors: + raise + + raise PermanentError(exception) from exception + except TemporaryError: + raise + except Exception as exception: + raise PermanentError(exception) from exception + + return res + + return await retry_with_backoff_async( + do_request, + retries.config.backoff.initial_interval, + retries.config.backoff.max_interval, + retries.config.backoff.exponent, + retries.config.backoff.max_elapsed_time, + ) + + return await func() + + +def retry_with_backoff( + func, + initial_interval=500, + max_interval=60000, + exponent=1.5, + max_elapsed_time=3600000, +): + start = round(time.time() * 1000) + retries = 0 + + while True: + try: + return func() + except PermanentError as exception: + raise exception.inner + except Exception as exception: # pylint: disable=broad-exception-caught + now = round(time.time() * 1000) + if now - start > max_elapsed_time: + if isinstance(exception, TemporaryError): + return exception.response + + raise + + sleep = _get_sleep_interval( + exception, initial_interval, max_interval, exponent, retries + ) + time.sleep(sleep) + retries += 1 + + +async def retry_with_backoff_async( + func, + initial_interval=500, + max_interval=60000, + exponent=1.5, + max_elapsed_time=3600000, +): + start = round(time.time() * 1000) + retries = 0 + + while True: + try: + return await func() + except PermanentError as exception: + raise exception.inner + except Exception as exception: # pylint: disable=broad-exception-caught + now = round(time.time() * 1000) + if now - start > max_elapsed_time: + if isinstance(exception, TemporaryError): + return exception.response + + raise + + sleep = _get_sleep_interval( + exception, initial_interval, max_interval, exponent, retries + ) + await asyncio.sleep(sleep) + retries += 1 diff --git a/packages/gcp/src/mistralai/gcp/client/utils/security.py b/packages/gcp/src/mistralai/gcp/client/utils/security.py new file mode 100644 index 00000000..17996bd5 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/utils/security.py @@ -0,0 +1,176 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import base64 +from typing import ( + Any, + Dict, + List, + Tuple, +) +from pydantic import BaseModel +from pydantic.fields import FieldInfo + +from .metadata import ( + SecurityMetadata, + find_field_metadata, +) + + +def get_security(security: Any) -> Tuple[Dict[str, str], Dict[str, List[str]]]: + headers: Dict[str, str] = {} + query_params: Dict[str, List[str]] = {} + + if security is None: + return headers, query_params + + if not isinstance(security, BaseModel): + raise TypeError("security must be a pydantic model") + + sec_fields: Dict[str, FieldInfo] = security.__class__.model_fields + for name in sec_fields: + sec_field = sec_fields[name] + + value = getattr(security, name) + if value is None: + continue + + metadata = find_field_metadata(sec_field, SecurityMetadata) + if metadata is None: + continue + if metadata.option: + _parse_security_option(headers, query_params, value) + return headers, query_params + if metadata.scheme: + # Special case for basic auth or custom auth which could be a flattened model + if metadata.sub_type in ["basic", "custom"] and not isinstance( + value, BaseModel + ): + _parse_security_scheme(headers, query_params, metadata, name, security) + else: + _parse_security_scheme(headers, query_params, metadata, name, value) + + return headers, query_params + + +def _parse_security_option( + headers: Dict[str, str], query_params: Dict[str, List[str]], option: Any +): + if not isinstance(option, BaseModel): + raise TypeError("security option must be a pydantic model") + + opt_fields: Dict[str, FieldInfo] = option.__class__.model_fields + for name in opt_fields: + opt_field = opt_fields[name] + + metadata = find_field_metadata(opt_field, SecurityMetadata) + if metadata is None or not metadata.scheme: + continue + _parse_security_scheme( + headers, query_params, metadata, name, getattr(option, name) + ) + + +def _parse_security_scheme( + headers: Dict[str, str], + query_params: Dict[str, List[str]], + scheme_metadata: SecurityMetadata, + field_name: str, + scheme: Any, +): + scheme_type = scheme_metadata.scheme_type + sub_type = scheme_metadata.sub_type + + if isinstance(scheme, BaseModel): + if scheme_type == "http": + if sub_type == "basic": + _parse_basic_auth_scheme(headers, scheme) + return + if sub_type == "custom": + return + + scheme_fields: Dict[str, FieldInfo] = scheme.__class__.model_fields + for name in scheme_fields: + scheme_field = scheme_fields[name] + + metadata = find_field_metadata(scheme_field, SecurityMetadata) + if metadata is None or metadata.field_name is None: + continue + + value = getattr(scheme, name) + + _parse_security_scheme_value( + headers, query_params, scheme_metadata, metadata, name, value + ) + else: + _parse_security_scheme_value( + headers, query_params, scheme_metadata, scheme_metadata, field_name, scheme + ) + + +def _parse_security_scheme_value( + headers: Dict[str, str], + query_params: Dict[str, List[str]], + scheme_metadata: SecurityMetadata, + security_metadata: SecurityMetadata, + field_name: str, + value: Any, +): + scheme_type = scheme_metadata.scheme_type + sub_type = scheme_metadata.sub_type + + header_name = security_metadata.get_field_name(field_name) + + if scheme_type == "apiKey": + if sub_type == "header": + headers[header_name] = value + elif sub_type == "query": + query_params[header_name] = [value] + else: + raise ValueError("sub type {sub_type} not supported") + elif scheme_type == "openIdConnect": + headers[header_name] = _apply_bearer(value) + elif scheme_type == "oauth2": + if sub_type != "client_credentials": + headers[header_name] = _apply_bearer(value) + elif scheme_type == "http": + if sub_type == "bearer": + headers[header_name] = _apply_bearer(value) + elif sub_type == "basic": + headers[header_name] = value + elif sub_type == "custom": + return + else: + raise ValueError("sub type {sub_type} not supported") + else: + raise ValueError("scheme type {scheme_type} not supported") + + +def _apply_bearer(token: str) -> str: + return token.lower().startswith("bearer ") and token or f"Bearer {token}" + + +def _parse_basic_auth_scheme(headers: Dict[str, str], scheme: Any): + username = "" + password = "" + + if not isinstance(scheme, BaseModel): + raise TypeError("basic auth scheme must be a pydantic model") + + scheme_fields: Dict[str, FieldInfo] = scheme.__class__.model_fields + for name in scheme_fields: + scheme_field = scheme_fields[name] + + metadata = find_field_metadata(scheme_field, SecurityMetadata) + if metadata is None or metadata.field_name is None: + continue + + field_name = metadata.field_name + value = getattr(scheme, name) + + if field_name == "username": + username = value + if field_name == "password": + password = value + + data = f"{username}:{password}".encode() + headers["Authorization"] = f"Basic {base64.b64encode(data).decode()}" diff --git a/packages/gcp/src/mistralai/gcp/client/utils/serializers.py b/packages/gcp/src/mistralai/gcp/client/utils/serializers.py new file mode 100644 index 00000000..14321eb4 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/utils/serializers.py @@ -0,0 +1,229 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from decimal import Decimal +import functools +import json +import typing +from typing import Any, Dict, List, Tuple, Union, get_args +import typing_extensions +from typing_extensions import get_origin + +import httpx +from pydantic import ConfigDict, create_model +from pydantic_core import from_json + +from ..types.basemodel import BaseModel, Nullable, OptionalNullable, Unset + + +def serialize_decimal(as_str: bool): + def serialize(d): + # Optional[T] is a Union[T, None] + if is_union(type(d)) and type(None) in get_args(type(d)) and d is None: + return None + if isinstance(d, Unset): + return d + + if not isinstance(d, Decimal): + raise ValueError("Expected Decimal object") + + return str(d) if as_str else float(d) + + return serialize + + +def validate_decimal(d): + if d is None: + return None + + if isinstance(d, (Decimal, Unset)): + return d + + if not isinstance(d, (str, int, float)): + raise ValueError("Expected string, int or float") + + return Decimal(str(d)) + + +def serialize_float(as_str: bool): + def serialize(f): + # Optional[T] is a Union[T, None] + if is_union(type(f)) and type(None) in get_args(type(f)) and f is None: + return None + if isinstance(f, Unset): + return f + + if not isinstance(f, float): + raise ValueError("Expected float") + + return str(f) if as_str else f + + return serialize + + +def validate_float(f): + if f is None: + return None + + if isinstance(f, (float, Unset)): + return f + + if not isinstance(f, str): + raise ValueError("Expected string") + + return float(f) + + +def serialize_int(as_str: bool): + def serialize(i): + # Optional[T] is a Union[T, None] + if is_union(type(i)) and type(None) in get_args(type(i)) and i is None: + return None + if isinstance(i, Unset): + return i + + if not isinstance(i, int): + raise ValueError("Expected int") + + return str(i) if as_str else i + + return serialize + + +def validate_int(b): + if b is None: + return None + + if isinstance(b, (int, Unset)): + return b + + if not isinstance(b, str): + raise ValueError("Expected string") + + return int(b) + + +def validate_const(v): + def validate(c): + # Optional[T] is a Union[T, None] + if is_union(type(c)) and type(None) in get_args(type(c)) and c is None: + return None + + if v != c: + raise ValueError(f"Expected {v}") + + return c + + return validate + + +def unmarshal_json(raw, typ: Any) -> Any: + return unmarshal(from_json(raw), typ) + + +def unmarshal(val, typ: Any) -> Any: + unmarshaller = create_model( + "Unmarshaller", + body=(typ, ...), + __config__=ConfigDict(populate_by_name=True, arbitrary_types_allowed=True), + ) + + m = unmarshaller(body=val) + + # pyright: ignore[reportAttributeAccessIssue] + return m.body # type: ignore + + +def marshal_json(val, typ): + if is_nullable(typ) and val is None: + return "null" + + marshaller = create_model( + "Marshaller", + body=(typ, ...), + __config__=ConfigDict(populate_by_name=True, arbitrary_types_allowed=True), + ) + + m = marshaller(body=val) + + d = m.model_dump(by_alias=True, mode="json", exclude_none=True) + + if len(d) == 0: + return "" + + return json.dumps(d[next(iter(d))], separators=(",", ":")) + + +def is_nullable(field): + origin = get_origin(field) + if origin is Nullable or origin is OptionalNullable: + return True + + if not origin is Union or type(None) not in get_args(field): + return False + + for arg in get_args(field): + if get_origin(arg) is Nullable or get_origin(arg) is OptionalNullable: + return True + + return False + + +def is_union(obj: object) -> bool: + """ + Returns True if the given object is a typing.Union or typing_extensions.Union. + """ + return any( + obj is typing_obj for typing_obj in _get_typing_objects_by_name_of("Union") + ) + + +def stream_to_text(stream: httpx.Response) -> str: + return "".join(stream.iter_text()) + + +async def stream_to_text_async(stream: httpx.Response) -> str: + return "".join([chunk async for chunk in stream.aiter_text()]) + + +def stream_to_bytes(stream: httpx.Response) -> bytes: + return stream.content + + +async def stream_to_bytes_async(stream: httpx.Response) -> bytes: + return await stream.aread() + + +def get_pydantic_model(data: Any, typ: Any) -> Any: + if not _contains_pydantic_model(data): + return unmarshal(data, typ) + + return data + + +def _contains_pydantic_model(data: Any) -> bool: + if isinstance(data, BaseModel): + return True + if isinstance(data, List): + return any(_contains_pydantic_model(item) for item in data) + if isinstance(data, Dict): + return any(_contains_pydantic_model(value) for value in data.values()) + + return False + + +@functools.cache +def _get_typing_objects_by_name_of(name: str) -> Tuple[Any, ...]: + """ + Get typing objects by name from typing and typing_extensions. + Reference: https://typing-extensions.readthedocs.io/en/latest/#runtime-use-of-types + """ + result = tuple( + getattr(module, name) + for module in (typing, typing_extensions) + if hasattr(module, name) + ) + if not result: + raise ValueError( + f"Neither typing nor typing_extensions has an object called {name!r}" + ) + return result diff --git a/packages/gcp/src/mistralai/gcp/client/utils/unions.py b/packages/gcp/src/mistralai/gcp/client/utils/unions.py new file mode 100644 index 00000000..a227f4e8 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/utils/unions.py @@ -0,0 +1,32 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from typing import Any + +from pydantic import BaseModel, TypeAdapter + + +def parse_open_union( + v: Any, + *, + disc_key: str, + variants: dict[str, Any], + unknown_cls: type, + union_name: str, +) -> Any: + """Parse an open discriminated union value with forward-compatibility. + + Known discriminator values are dispatched to their variant types. + Unknown discriminator values produce an instance of the fallback class, + preserving the raw payload for inspection. + """ + if isinstance(v, BaseModel): + return v + if not isinstance(v, dict) or disc_key not in v: + raise ValueError(f"{union_name}: expected object with '{disc_key}' field") + disc = v[disc_key] + variant_cls = variants.get(disc) + if variant_cls is not None: + if isinstance(variant_cls, type) and issubclass(variant_cls, BaseModel): + return variant_cls.model_validate(v) + return TypeAdapter(variant_cls).validate_python(v) + return unknown_cls(raw=v) diff --git a/packages/gcp/src/mistralai/gcp/client/utils/unmarshal_json_response.py b/packages/gcp/src/mistralai/gcp/client/utils/unmarshal_json_response.py new file mode 100644 index 00000000..ead3e5a0 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/utils/unmarshal_json_response.py @@ -0,0 +1,38 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from typing import Any, Optional, Type, TypeVar, overload + +import httpx + +from .serializers import unmarshal_json +from mistralai.gcp.client import errors + +T = TypeVar("T") + + +@overload +def unmarshal_json_response( + typ: Type[T], http_res: httpx.Response, body: Optional[str] = None +) -> T: ... + + +@overload +def unmarshal_json_response( + typ: Any, http_res: httpx.Response, body: Optional[str] = None +) -> Any: ... + + +def unmarshal_json_response( + typ: Any, http_res: httpx.Response, body: Optional[str] = None +) -> Any: + if body is None: + body = http_res.text + try: + return unmarshal_json(body, typ) + except Exception as e: + raise errors.ResponseValidationError( + "Response validation failed", + http_res, + e, + body, + ) from e diff --git a/packages/gcp/src/mistralai/gcp/client/utils/url.py b/packages/gcp/src/mistralai/gcp/client/utils/url.py new file mode 100644 index 00000000..c78ccbae --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/utils/url.py @@ -0,0 +1,155 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from decimal import Decimal +from typing import ( + Any, + Dict, + get_type_hints, + List, + Optional, + Union, + get_args, + get_origin, +) +from pydantic import BaseModel +from pydantic.fields import FieldInfo + +from .metadata import ( + PathParamMetadata, + find_field_metadata, +) +from .values import ( + _get_serialized_params, + _is_set, + _populate_from_globals, + _val_to_string, +) + + +def generate_url( + server_url: str, + path: str, + path_params: Any, + gbls: Optional[Any] = None, +) -> str: + path_param_values: Dict[str, str] = {} + + globals_already_populated = _populate_path_params( + path_params, gbls, path_param_values, [] + ) + if _is_set(gbls): + _populate_path_params(gbls, None, path_param_values, globals_already_populated) + + for key, value in path_param_values.items(): + path = path.replace("{" + key + "}", value, 1) + + return remove_suffix(server_url, "/") + path + + +def _populate_path_params( + path_params: Any, + gbls: Any, + path_param_values: Dict[str, str], + skip_fields: List[str], +) -> List[str]: + globals_already_populated: List[str] = [] + + if not isinstance(path_params, BaseModel): + return globals_already_populated + + path_param_fields: Dict[str, FieldInfo] = path_params.__class__.model_fields + path_param_field_types = get_type_hints(path_params.__class__) + for name in path_param_fields: + if name in skip_fields: + continue + + field = path_param_fields[name] + + param_metadata = find_field_metadata(field, PathParamMetadata) + if param_metadata is None: + continue + + param = getattr(path_params, name) if _is_set(path_params) else None + param, global_found = _populate_from_globals( + name, param, PathParamMetadata, gbls + ) + if global_found: + globals_already_populated.append(name) + + if not _is_set(param): + continue + + f_name = field.alias if field.alias is not None else name + serialization = param_metadata.serialization + if serialization is not None: + serialized_params = _get_serialized_params( + param_metadata, f_name, param, path_param_field_types[name] + ) + for key, value in serialized_params.items(): + path_param_values[key] = value + else: + pp_vals: List[str] = [] + if param_metadata.style == "simple": + if isinstance(param, List): + for pp_val in param: + if not _is_set(pp_val): + continue + pp_vals.append(_val_to_string(pp_val)) + path_param_values[f_name] = ",".join(pp_vals) + elif isinstance(param, Dict): + for pp_key in param: + if not _is_set(param[pp_key]): + continue + if param_metadata.explode: + pp_vals.append(f"{pp_key}={_val_to_string(param[pp_key])}") + else: + pp_vals.append(f"{pp_key},{_val_to_string(param[pp_key])}") + path_param_values[f_name] = ",".join(pp_vals) + elif not isinstance(param, (str, int, float, complex, bool, Decimal)): + param_fields: Dict[str, FieldInfo] = param.__class__.model_fields + for name in param_fields: + param_field = param_fields[name] + + param_value_metadata = find_field_metadata( + param_field, PathParamMetadata + ) + if param_value_metadata is None: + continue + + param_name = ( + param_field.alias if param_field.alias is not None else name + ) + + param_field_val = getattr(param, name) + if not _is_set(param_field_val): + continue + if param_metadata.explode: + pp_vals.append( + f"{param_name}={_val_to_string(param_field_val)}" + ) + else: + pp_vals.append( + f"{param_name},{_val_to_string(param_field_val)}" + ) + path_param_values[f_name] = ",".join(pp_vals) + elif _is_set(param): + path_param_values[f_name] = _val_to_string(param) + + return globals_already_populated + + +def is_optional(field): + return get_origin(field) is Union and type(None) in get_args(field) + + +def template_url(url_with_params: str, params: Dict[str, str]) -> str: + for key, value in params.items(): + url_with_params = url_with_params.replace("{" + key + "}", value) + + return url_with_params + + +def remove_suffix(input_string, suffix): + if suffix and input_string.endswith(suffix): + return input_string[: -len(suffix)] + return input_string diff --git a/packages/gcp/src/mistralai/gcp/client/utils/values.py b/packages/gcp/src/mistralai/gcp/client/utils/values.py new file mode 100644 index 00000000..dae01a44 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/utils/values.py @@ -0,0 +1,137 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from datetime import datetime +from enum import Enum +from email.message import Message +from functools import partial +import os +from typing import Any, Callable, Dict, List, Optional, Tuple, TypeVar, Union, cast + +from httpx import Response +from pydantic import BaseModel +from pydantic.fields import FieldInfo + +from ..types.basemodel import Unset + +from .serializers import marshal_json + +from .metadata import ParamMetadata, find_field_metadata + + +def match_content_type(content_type: str, pattern: str) -> bool: + if pattern in (content_type, "*", "*/*"): + return True + + msg = Message() + msg["content-type"] = content_type + media_type = msg.get_content_type() + + if media_type == pattern: + return True + + parts = media_type.split("/") + if len(parts) == 2: + if pattern in (f"{parts[0]}/*", f"*/{parts[1]}"): + return True + + return False + + +def match_status_codes(status_codes: List[str], status_code: int) -> bool: + if "default" in status_codes: + return True + + for code in status_codes: + if code == str(status_code): + return True + + if code.endswith("XX") and code.startswith(str(status_code)[:1]): + return True + return False + + +T = TypeVar("T") + +def cast_partial(typ): + return partial(cast, typ) + +def get_global_from_env( + value: Optional[T], env_key: str, type_cast: Callable[[str], T] +) -> Optional[T]: + if value is not None: + return value + env_value = os.getenv(env_key) + if env_value is not None: + try: + return type_cast(env_value) + except ValueError: + pass + return None + + +def match_response( + response: Response, code: Union[str, List[str]], content_type: str +) -> bool: + codes = code if isinstance(code, list) else [code] + return match_status_codes(codes, response.status_code) and match_content_type( + response.headers.get("content-type", "application/octet-stream"), content_type + ) + + +def _populate_from_globals( + param_name: str, value: Any, param_metadata_type: type, gbls: Any +) -> Tuple[Any, bool]: + if gbls is None: + return value, False + + if not isinstance(gbls, BaseModel): + raise TypeError("globals must be a pydantic model") + + global_fields: Dict[str, FieldInfo] = gbls.__class__.model_fields + found = False + for name in global_fields: + field = global_fields[name] + if name is not param_name: + continue + + found = True + + if value is not None: + return value, True + + global_value = getattr(gbls, name) + + param_metadata = find_field_metadata(field, param_metadata_type) + if param_metadata is None: + return value, True + + return global_value, True + + return value, found + + +def _val_to_string(val) -> str: + if isinstance(val, bool): + return str(val).lower() + if isinstance(val, datetime): + return str(val.isoformat().replace("+00:00", "Z")) + if isinstance(val, Enum): + return str(val.value) + + return str(val) + + +def _get_serialized_params( + metadata: ParamMetadata, field_name: str, obj: Any, typ: type +) -> Dict[str, str]: + params: Dict[str, str] = {} + + serialization = metadata.serialization + if serialization == "json": + params[field_name] = marshal_json(obj, typ) + + return params + + +def _is_set(value: Any) -> bool: + return value is not None and not isinstance(value, Unset) diff --git a/packages/gcp/uv.lock b/packages/gcp/uv.lock new file mode 100644 index 00000000..0c978164 --- /dev/null +++ b/packages/gcp/uv.lock @@ -0,0 +1,774 @@ +version = 1 +revision = 3 +requires-python = ">=3.10" +resolution-markers = [ + "python_full_version >= '3.12'", + "python_full_version == '3.11.*'", + "python_full_version < '3.11'", +] + +[[package]] +name = "annotated-types" +version = "0.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ee/67/531ea369ba64dcff5ec9c3402f9f51bf748cec26dde048a2f973a4eea7f5/annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89", size = 16081, upload-time = "2024-05-20T21:33:25.928Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53", size = 13643, upload-time = "2024-05-20T21:33:24.1Z" }, +] + +[[package]] +name = "anyio" +version = "4.12.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "exceptiongroup", marker = "python_full_version < '3.11'" }, + { name = "idna" }, + { name = "typing-extensions", marker = "python_full_version < '3.13'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/16/ce/8a777047513153587e5434fd752e89334ac33e379aa3497db860eeb60377/anyio-4.12.0.tar.gz", hash = "sha256:73c693b567b0c55130c104d0b43a9baf3aa6a31fc6110116509f27bf75e21ec0", size = 228266, upload-time = "2025-11-28T23:37:38.911Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7f/9c/36c5c37947ebfb8c7f22e0eb6e4d188ee2d53aa3880f3f2744fb894f0cb1/anyio-4.12.0-py3-none-any.whl", hash = "sha256:dad2376a628f98eeca4881fc56cd06affd18f659b17a747d3ff0307ced94b1bb", size = 113362, upload-time = "2025-11-28T23:36:57.897Z" }, +] + +[[package]] +name = "astroid" +version = "3.2.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/9e/53/1067e1113ecaf58312357f2cd93063674924119d80d173adc3f6f2387aa2/astroid-3.2.4.tar.gz", hash = "sha256:0e14202810b30da1b735827f78f5157be2bbd4a7a59b7707ca0bfc2fb4c0063a", size = 397576, upload-time = "2024-07-20T12:57:43.26Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/80/96/b32bbbb46170a1c8b8b1f28c794202e25cfe743565e9d3469b8eb1e0cc05/astroid-3.2.4-py3-none-any.whl", hash = "sha256:413658a61eeca6202a59231abb473f932038fbcbf1666587f66d482083413a25", size = 276348, upload-time = "2024-07-20T12:57:40.886Z" }, +] + +[[package]] +name = "cachetools" +version = "6.2.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/bc/1d/ede8680603f6016887c062a2cf4fc8fdba905866a3ab8831aa8aa651320c/cachetools-6.2.4.tar.gz", hash = "sha256:82c5c05585e70b6ba2d3ae09ea60b79548872185d2f24ae1f2709d37299fd607", size = 31731, upload-time = "2025-12-15T18:24:53.744Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2c/fc/1d7b80d0eb7b714984ce40efc78859c022cd930e402f599d8ca9e39c78a4/cachetools-6.2.4-py3-none-any.whl", hash = "sha256:69a7a52634fed8b8bf6e24a050fb60bff1c9bd8f6d24572b99c32d4e71e62a51", size = 11551, upload-time = "2025-12-15T18:24:52.332Z" }, +] + +[[package]] +name = "certifi" +version = "2025.11.12" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a2/8c/58f469717fa48465e4a50c014a0400602d3c437d7c0c468e17ada824da3a/certifi-2025.11.12.tar.gz", hash = "sha256:d8ab5478f2ecd78af242878415affce761ca6bc54a22a27e026d7c25357c3316", size = 160538, upload-time = "2025-11-12T02:54:51.517Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/70/7d/9bc192684cea499815ff478dfcdc13835ddf401365057044fb721ec6bddb/certifi-2025.11.12-py3-none-any.whl", hash = "sha256:97de8790030bbd5c2d96b7ec782fc2f7820ef8dba6db909ccf95449f2d062d4b", size = 159438, upload-time = "2025-11-12T02:54:49.735Z" }, +] + +[[package]] +name = "charset-normalizer" +version = "3.4.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/13/69/33ddede1939fdd074bce5434295f38fae7136463422fe4fd3e0e89b98062/charset_normalizer-3.4.4.tar.gz", hash = "sha256:94537985111c35f28720e43603b8e7b43a6ecfb2ce1d3058bbe955b73404e21a", size = 129418, upload-time = "2025-10-14T04:42:32.879Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1f/b8/6d51fc1d52cbd52cd4ccedd5b5b2f0f6a11bbf6765c782298b0f3e808541/charset_normalizer-3.4.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e824f1492727fa856dd6eda4f7cee25f8518a12f3c4a56a74e8095695089cf6d", size = 209709, upload-time = "2025-10-14T04:40:11.385Z" }, + { url = "https://files.pythonhosted.org/packages/5c/af/1f9d7f7faafe2ddfb6f72a2e07a548a629c61ad510fe60f9630309908fef/charset_normalizer-3.4.4-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4bd5d4137d500351a30687c2d3971758aac9a19208fc110ccb9d7188fbe709e8", size = 148814, upload-time = "2025-10-14T04:40:13.135Z" }, + { url = "https://files.pythonhosted.org/packages/79/3d/f2e3ac2bbc056ca0c204298ea4e3d9db9b4afe437812638759db2c976b5f/charset_normalizer-3.4.4-cp310-cp310-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:027f6de494925c0ab2a55eab46ae5129951638a49a34d87f4c3eda90f696b4ad", size = 144467, upload-time = "2025-10-14T04:40:14.728Z" }, + { url = "https://files.pythonhosted.org/packages/ec/85/1bf997003815e60d57de7bd972c57dc6950446a3e4ccac43bc3070721856/charset_normalizer-3.4.4-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f820802628d2694cb7e56db99213f930856014862f3fd943d290ea8438d07ca8", size = 162280, upload-time = "2025-10-14T04:40:16.14Z" }, + { url = "https://files.pythonhosted.org/packages/3e/8e/6aa1952f56b192f54921c436b87f2aaf7c7a7c3d0d1a765547d64fd83c13/charset_normalizer-3.4.4-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:798d75d81754988d2565bff1b97ba5a44411867c0cf32b77a7e8f8d84796b10d", size = 159454, upload-time = "2025-10-14T04:40:17.567Z" }, + { url = "https://files.pythonhosted.org/packages/36/3b/60cbd1f8e93aa25d1c669c649b7a655b0b5fb4c571858910ea9332678558/charset_normalizer-3.4.4-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9d1bb833febdff5c8927f922386db610b49db6e0d4f4ee29601d71e7c2694313", size = 153609, upload-time = "2025-10-14T04:40:19.08Z" }, + { url = "https://files.pythonhosted.org/packages/64/91/6a13396948b8fd3c4b4fd5bc74d045f5637d78c9675585e8e9fbe5636554/charset_normalizer-3.4.4-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:9cd98cdc06614a2f768d2b7286d66805f94c48cde050acdbbb7db2600ab3197e", size = 151849, upload-time = "2025-10-14T04:40:20.607Z" }, + { url = "https://files.pythonhosted.org/packages/b7/7a/59482e28b9981d105691e968c544cc0df3b7d6133152fb3dcdc8f135da7a/charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:077fbb858e903c73f6c9db43374fd213b0b6a778106bc7032446a8e8b5b38b93", size = 151586, upload-time = "2025-10-14T04:40:21.719Z" }, + { url = "https://files.pythonhosted.org/packages/92/59/f64ef6a1c4bdd2baf892b04cd78792ed8684fbc48d4c2afe467d96b4df57/charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:244bfb999c71b35de57821b8ea746b24e863398194a4014e4c76adc2bbdfeff0", size = 145290, upload-time = "2025-10-14T04:40:23.069Z" }, + { url = "https://files.pythonhosted.org/packages/6b/63/3bf9f279ddfa641ffa1962b0db6a57a9c294361cc2f5fcac997049a00e9c/charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:64b55f9dce520635f018f907ff1b0df1fdc31f2795a922fb49dd14fbcdf48c84", size = 163663, upload-time = "2025-10-14T04:40:24.17Z" }, + { url = "https://files.pythonhosted.org/packages/ed/09/c9e38fc8fa9e0849b172b581fd9803bdf6e694041127933934184e19f8c3/charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:faa3a41b2b66b6e50f84ae4a68c64fcd0c44355741c6374813a800cd6695db9e", size = 151964, upload-time = "2025-10-14T04:40:25.368Z" }, + { url = "https://files.pythonhosted.org/packages/d2/d1/d28b747e512d0da79d8b6a1ac18b7ab2ecfd81b2944c4c710e166d8dd09c/charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:6515f3182dbe4ea06ced2d9e8666d97b46ef4c75e326b79bb624110f122551db", size = 161064, upload-time = "2025-10-14T04:40:26.806Z" }, + { url = "https://files.pythonhosted.org/packages/bb/9a/31d62b611d901c3b9e5500c36aab0ff5eb442043fb3a1c254200d3d397d9/charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:cc00f04ed596e9dc0da42ed17ac5e596c6ccba999ba6bd92b0e0aef2f170f2d6", size = 155015, upload-time = "2025-10-14T04:40:28.284Z" }, + { url = "https://files.pythonhosted.org/packages/1f/f3/107e008fa2bff0c8b9319584174418e5e5285fef32f79d8ee6a430d0039c/charset_normalizer-3.4.4-cp310-cp310-win32.whl", hash = "sha256:f34be2938726fc13801220747472850852fe6b1ea75869a048d6f896838c896f", size = 99792, upload-time = "2025-10-14T04:40:29.613Z" }, + { url = "https://files.pythonhosted.org/packages/eb/66/e396e8a408843337d7315bab30dbf106c38966f1819f123257f5520f8a96/charset_normalizer-3.4.4-cp310-cp310-win_amd64.whl", hash = "sha256:a61900df84c667873b292c3de315a786dd8dac506704dea57bc957bd31e22c7d", size = 107198, upload-time = "2025-10-14T04:40:30.644Z" }, + { url = "https://files.pythonhosted.org/packages/b5/58/01b4f815bf0312704c267f2ccb6e5d42bcc7752340cd487bc9f8c3710597/charset_normalizer-3.4.4-cp310-cp310-win_arm64.whl", hash = "sha256:cead0978fc57397645f12578bfd2d5ea9138ea0fac82b2f63f7f7c6877986a69", size = 100262, upload-time = "2025-10-14T04:40:32.108Z" }, + { url = "https://files.pythonhosted.org/packages/ed/27/c6491ff4954e58a10f69ad90aca8a1b6fe9c5d3c6f380907af3c37435b59/charset_normalizer-3.4.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:6e1fcf0720908f200cd21aa4e6750a48ff6ce4afe7ff5a79a90d5ed8a08296f8", size = 206988, upload-time = "2025-10-14T04:40:33.79Z" }, + { url = "https://files.pythonhosted.org/packages/94/59/2e87300fe67ab820b5428580a53cad894272dbb97f38a7a814a2a1ac1011/charset_normalizer-3.4.4-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5f819d5fe9234f9f82d75bdfa9aef3a3d72c4d24a6e57aeaebba32a704553aa0", size = 147324, upload-time = "2025-10-14T04:40:34.961Z" }, + { url = "https://files.pythonhosted.org/packages/07/fb/0cf61dc84b2b088391830f6274cb57c82e4da8bbc2efeac8c025edb88772/charset_normalizer-3.4.4-cp311-cp311-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:a59cb51917aa591b1c4e6a43c132f0cdc3c76dbad6155df4e28ee626cc77a0a3", size = 142742, upload-time = "2025-10-14T04:40:36.105Z" }, + { url = "https://files.pythonhosted.org/packages/62/8b/171935adf2312cd745d290ed93cf16cf0dfe320863ab7cbeeae1dcd6535f/charset_normalizer-3.4.4-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:8ef3c867360f88ac904fd3f5e1f902f13307af9052646963ee08ff4f131adafc", size = 160863, upload-time = "2025-10-14T04:40:37.188Z" }, + { url = "https://files.pythonhosted.org/packages/09/73/ad875b192bda14f2173bfc1bc9a55e009808484a4b256748d931b6948442/charset_normalizer-3.4.4-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d9e45d7faa48ee908174d8fe84854479ef838fc6a705c9315372eacbc2f02897", size = 157837, upload-time = "2025-10-14T04:40:38.435Z" }, + { url = "https://files.pythonhosted.org/packages/6d/fc/de9cce525b2c5b94b47c70a4b4fb19f871b24995c728e957ee68ab1671ea/charset_normalizer-3.4.4-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:840c25fb618a231545cbab0564a799f101b63b9901f2569faecd6b222ac72381", size = 151550, upload-time = "2025-10-14T04:40:40.053Z" }, + { url = "https://files.pythonhosted.org/packages/55/c2/43edd615fdfba8c6f2dfbd459b25a6b3b551f24ea21981e23fb768503ce1/charset_normalizer-3.4.4-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:ca5862d5b3928c4940729dacc329aa9102900382fea192fc5e52eb69d6093815", size = 149162, upload-time = "2025-10-14T04:40:41.163Z" }, + { url = "https://files.pythonhosted.org/packages/03/86/bde4ad8b4d0e9429a4e82c1e8f5c659993a9a863ad62c7df05cf7b678d75/charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d9c7f57c3d666a53421049053eaacdd14bbd0a528e2186fcb2e672effd053bb0", size = 150019, upload-time = "2025-10-14T04:40:42.276Z" }, + { url = "https://files.pythonhosted.org/packages/1f/86/a151eb2af293a7e7bac3a739b81072585ce36ccfb4493039f49f1d3cae8c/charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:277e970e750505ed74c832b4bf75dac7476262ee2a013f5574dd49075879e161", size = 143310, upload-time = "2025-10-14T04:40:43.439Z" }, + { url = "https://files.pythonhosted.org/packages/b5/fe/43dae6144a7e07b87478fdfc4dbe9efd5defb0e7ec29f5f58a55aeef7bf7/charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:31fd66405eaf47bb62e8cd575dc621c56c668f27d46a61d975a249930dd5e2a4", size = 162022, upload-time = "2025-10-14T04:40:44.547Z" }, + { url = "https://files.pythonhosted.org/packages/80/e6/7aab83774f5d2bca81f42ac58d04caf44f0cc2b65fc6db2b3b2e8a05f3b3/charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:0d3d8f15c07f86e9ff82319b3d9ef6f4bf907608f53fe9d92b28ea9ae3d1fd89", size = 149383, upload-time = "2025-10-14T04:40:46.018Z" }, + { url = "https://files.pythonhosted.org/packages/4f/e8/b289173b4edae05c0dde07f69f8db476a0b511eac556dfe0d6bda3c43384/charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:9f7fcd74d410a36883701fafa2482a6af2ff5ba96b9a620e9e0721e28ead5569", size = 159098, upload-time = "2025-10-14T04:40:47.081Z" }, + { url = "https://files.pythonhosted.org/packages/d8/df/fe699727754cae3f8478493c7f45f777b17c3ef0600e28abfec8619eb49c/charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:ebf3e58c7ec8a8bed6d66a75d7fb37b55e5015b03ceae72a8e7c74495551e224", size = 152991, upload-time = "2025-10-14T04:40:48.246Z" }, + { url = "https://files.pythonhosted.org/packages/1a/86/584869fe4ddb6ffa3bd9f491b87a01568797fb9bd8933f557dba9771beaf/charset_normalizer-3.4.4-cp311-cp311-win32.whl", hash = "sha256:eecbc200c7fd5ddb9a7f16c7decb07b566c29fa2161a16cf67b8d068bd21690a", size = 99456, upload-time = "2025-10-14T04:40:49.376Z" }, + { url = "https://files.pythonhosted.org/packages/65/f6/62fdd5feb60530f50f7e38b4f6a1d5203f4d16ff4f9f0952962c044e919a/charset_normalizer-3.4.4-cp311-cp311-win_amd64.whl", hash = "sha256:5ae497466c7901d54b639cf42d5b8c1b6a4fead55215500d2f486d34db48d016", size = 106978, upload-time = "2025-10-14T04:40:50.844Z" }, + { url = "https://files.pythonhosted.org/packages/7a/9d/0710916e6c82948b3be62d9d398cb4fcf4e97b56d6a6aeccd66c4b2f2bd5/charset_normalizer-3.4.4-cp311-cp311-win_arm64.whl", hash = "sha256:65e2befcd84bc6f37095f5961e68a6f077bf44946771354a28ad434c2cce0ae1", size = 99969, upload-time = "2025-10-14T04:40:52.272Z" }, + { url = "https://files.pythonhosted.org/packages/f3/85/1637cd4af66fa687396e757dec650f28025f2a2f5a5531a3208dc0ec43f2/charset_normalizer-3.4.4-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0a98e6759f854bd25a58a73fa88833fba3b7c491169f86ce1180c948ab3fd394", size = 208425, upload-time = "2025-10-14T04:40:53.353Z" }, + { url = "https://files.pythonhosted.org/packages/9d/6a/04130023fef2a0d9c62d0bae2649b69f7b7d8d24ea5536feef50551029df/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b5b290ccc2a263e8d185130284f8501e3e36c5e02750fc6b6bdeb2e9e96f1e25", size = 148162, upload-time = "2025-10-14T04:40:54.558Z" }, + { url = "https://files.pythonhosted.org/packages/78/29/62328d79aa60da22c9e0b9a66539feae06ca0f5a4171ac4f7dc285b83688/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:74bb723680f9f7a6234dcf67aea57e708ec1fbdf5699fb91dfd6f511b0a320ef", size = 144558, upload-time = "2025-10-14T04:40:55.677Z" }, + { url = "https://files.pythonhosted.org/packages/86/bb/b32194a4bf15b88403537c2e120b817c61cd4ecffa9b6876e941c3ee38fe/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f1e34719c6ed0b92f418c7c780480b26b5d9c50349e9a9af7d76bf757530350d", size = 161497, upload-time = "2025-10-14T04:40:57.217Z" }, + { url = "https://files.pythonhosted.org/packages/19/89/a54c82b253d5b9b111dc74aca196ba5ccfcca8242d0fb64146d4d3183ff1/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:2437418e20515acec67d86e12bf70056a33abdacb5cb1655042f6538d6b085a8", size = 159240, upload-time = "2025-10-14T04:40:58.358Z" }, + { url = "https://files.pythonhosted.org/packages/c0/10/d20b513afe03acc89ec33948320a5544d31f21b05368436d580dec4e234d/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:11d694519d7f29d6cd09f6ac70028dba10f92f6cdd059096db198c283794ac86", size = 153471, upload-time = "2025-10-14T04:40:59.468Z" }, + { url = "https://files.pythonhosted.org/packages/61/fa/fbf177b55bdd727010f9c0a3c49eefa1d10f960e5f09d1d887bf93c2e698/charset_normalizer-3.4.4-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:ac1c4a689edcc530fc9d9aa11f5774b9e2f33f9a0c6a57864e90908f5208d30a", size = 150864, upload-time = "2025-10-14T04:41:00.623Z" }, + { url = "https://files.pythonhosted.org/packages/05/12/9fbc6a4d39c0198adeebbde20b619790e9236557ca59fc40e0e3cebe6f40/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:21d142cc6c0ec30d2efee5068ca36c128a30b0f2c53c1c07bd78cb6bc1d3be5f", size = 150647, upload-time = "2025-10-14T04:41:01.754Z" }, + { url = "https://files.pythonhosted.org/packages/ad/1f/6a9a593d52e3e8c5d2b167daf8c6b968808efb57ef4c210acb907c365bc4/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:5dbe56a36425d26d6cfb40ce79c314a2e4dd6211d51d6d2191c00bed34f354cc", size = 145110, upload-time = "2025-10-14T04:41:03.231Z" }, + { url = "https://files.pythonhosted.org/packages/30/42/9a52c609e72471b0fc54386dc63c3781a387bb4fe61c20231a4ebcd58bdd/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:5bfbb1b9acf3334612667b61bd3002196fe2a1eb4dd74d247e0f2a4d50ec9bbf", size = 162839, upload-time = "2025-10-14T04:41:04.715Z" }, + { url = "https://files.pythonhosted.org/packages/c4/5b/c0682bbf9f11597073052628ddd38344a3d673fda35a36773f7d19344b23/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:d055ec1e26e441f6187acf818b73564e6e6282709e9bcb5b63f5b23068356a15", size = 150667, upload-time = "2025-10-14T04:41:05.827Z" }, + { url = "https://files.pythonhosted.org/packages/e4/24/a41afeab6f990cf2daf6cb8c67419b63b48cf518e4f56022230840c9bfb2/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:af2d8c67d8e573d6de5bc30cdb27e9b95e49115cd9baad5ddbd1a6207aaa82a9", size = 160535, upload-time = "2025-10-14T04:41:06.938Z" }, + { url = "https://files.pythonhosted.org/packages/2a/e5/6a4ce77ed243c4a50a1fecca6aaaab419628c818a49434be428fe24c9957/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:780236ac706e66881f3b7f2f32dfe90507a09e67d1d454c762cf642e6e1586e0", size = 154816, upload-time = "2025-10-14T04:41:08.101Z" }, + { url = "https://files.pythonhosted.org/packages/a8/ef/89297262b8092b312d29cdb2517cb1237e51db8ecef2e9af5edbe7b683b1/charset_normalizer-3.4.4-cp312-cp312-win32.whl", hash = "sha256:5833d2c39d8896e4e19b689ffc198f08ea58116bee26dea51e362ecc7cd3ed26", size = 99694, upload-time = "2025-10-14T04:41:09.23Z" }, + { url = "https://files.pythonhosted.org/packages/3d/2d/1e5ed9dd3b3803994c155cd9aacb60c82c331bad84daf75bcb9c91b3295e/charset_normalizer-3.4.4-cp312-cp312-win_amd64.whl", hash = "sha256:a79cfe37875f822425b89a82333404539ae63dbdddf97f84dcbc3d339aae9525", size = 107131, upload-time = "2025-10-14T04:41:10.467Z" }, + { url = "https://files.pythonhosted.org/packages/d0/d9/0ed4c7098a861482a7b6a95603edce4c0d9db2311af23da1fb2b75ec26fc/charset_normalizer-3.4.4-cp312-cp312-win_arm64.whl", hash = "sha256:376bec83a63b8021bb5c8ea75e21c4ccb86e7e45ca4eb81146091b56599b80c3", size = 100390, upload-time = "2025-10-14T04:41:11.915Z" }, + { url = "https://files.pythonhosted.org/packages/97/45/4b3a1239bbacd321068ea6e7ac28875b03ab8bc0aa0966452db17cd36714/charset_normalizer-3.4.4-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:e1f185f86a6f3403aa2420e815904c67b2f9ebc443f045edd0de921108345794", size = 208091, upload-time = "2025-10-14T04:41:13.346Z" }, + { url = "https://files.pythonhosted.org/packages/7d/62/73a6d7450829655a35bb88a88fca7d736f9882a27eacdca2c6d505b57e2e/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6b39f987ae8ccdf0d2642338faf2abb1862340facc796048b604ef14919e55ed", size = 147936, upload-time = "2025-10-14T04:41:14.461Z" }, + { url = "https://files.pythonhosted.org/packages/89/c5/adb8c8b3d6625bef6d88b251bbb0d95f8205831b987631ab0c8bb5d937c2/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:3162d5d8ce1bb98dd51af660f2121c55d0fa541b46dff7bb9b9f86ea1d87de72", size = 144180, upload-time = "2025-10-14T04:41:15.588Z" }, + { url = "https://files.pythonhosted.org/packages/91/ed/9706e4070682d1cc219050b6048bfd293ccf67b3d4f5a4f39207453d4b99/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:81d5eb2a312700f4ecaa977a8235b634ce853200e828fbadf3a9c50bab278328", size = 161346, upload-time = "2025-10-14T04:41:16.738Z" }, + { url = "https://files.pythonhosted.org/packages/d5/0d/031f0d95e4972901a2f6f09ef055751805ff541511dc1252ba3ca1f80cf5/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5bd2293095d766545ec1a8f612559f6b40abc0eb18bb2f5d1171872d34036ede", size = 158874, upload-time = "2025-10-14T04:41:17.923Z" }, + { url = "https://files.pythonhosted.org/packages/f5/83/6ab5883f57c9c801ce5e5677242328aa45592be8a00644310a008d04f922/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a8a8b89589086a25749f471e6a900d3f662d1d3b6e2e59dcecf787b1cc3a1894", size = 153076, upload-time = "2025-10-14T04:41:19.106Z" }, + { url = "https://files.pythonhosted.org/packages/75/1e/5ff781ddf5260e387d6419959ee89ef13878229732732ee73cdae01800f2/charset_normalizer-3.4.4-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:bc7637e2f80d8530ee4a78e878bce464f70087ce73cf7c1caf142416923b98f1", size = 150601, upload-time = "2025-10-14T04:41:20.245Z" }, + { url = "https://files.pythonhosted.org/packages/d7/57/71be810965493d3510a6ca79b90c19e48696fb1ff964da319334b12677f0/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f8bf04158c6b607d747e93949aa60618b61312fe647a6369f88ce2ff16043490", size = 150376, upload-time = "2025-10-14T04:41:21.398Z" }, + { url = "https://files.pythonhosted.org/packages/e5/d5/c3d057a78c181d007014feb7e9f2e65905a6c4ef182c0ddf0de2924edd65/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:554af85e960429cf30784dd47447d5125aaa3b99a6f0683589dbd27e2f45da44", size = 144825, upload-time = "2025-10-14T04:41:22.583Z" }, + { url = "https://files.pythonhosted.org/packages/e6/8c/d0406294828d4976f275ffbe66f00266c4b3136b7506941d87c00cab5272/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:74018750915ee7ad843a774364e13a3db91682f26142baddf775342c3f5b1133", size = 162583, upload-time = "2025-10-14T04:41:23.754Z" }, + { url = "https://files.pythonhosted.org/packages/d7/24/e2aa1f18c8f15c4c0e932d9287b8609dd30ad56dbe41d926bd846e22fb8d/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:c0463276121fdee9c49b98908b3a89c39be45d86d1dbaa22957e38f6321d4ce3", size = 150366, upload-time = "2025-10-14T04:41:25.27Z" }, + { url = "https://files.pythonhosted.org/packages/e4/5b/1e6160c7739aad1e2df054300cc618b06bf784a7a164b0f238360721ab86/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:362d61fd13843997c1c446760ef36f240cf81d3ebf74ac62652aebaf7838561e", size = 160300, upload-time = "2025-10-14T04:41:26.725Z" }, + { url = "https://files.pythonhosted.org/packages/7a/10/f882167cd207fbdd743e55534d5d9620e095089d176d55cb22d5322f2afd/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9a26f18905b8dd5d685d6d07b0cdf98a79f3c7a918906af7cc143ea2e164c8bc", size = 154465, upload-time = "2025-10-14T04:41:28.322Z" }, + { url = "https://files.pythonhosted.org/packages/89/66/c7a9e1b7429be72123441bfdbaf2bc13faab3f90b933f664db506dea5915/charset_normalizer-3.4.4-cp313-cp313-win32.whl", hash = "sha256:9b35f4c90079ff2e2edc5b26c0c77925e5d2d255c42c74fdb70fb49b172726ac", size = 99404, upload-time = "2025-10-14T04:41:29.95Z" }, + { url = "https://files.pythonhosted.org/packages/c4/26/b9924fa27db384bdcd97ab83b4f0a8058d96ad9626ead570674d5e737d90/charset_normalizer-3.4.4-cp313-cp313-win_amd64.whl", hash = "sha256:b435cba5f4f750aa6c0a0d92c541fb79f69a387c91e61f1795227e4ed9cece14", size = 107092, upload-time = "2025-10-14T04:41:31.188Z" }, + { url = "https://files.pythonhosted.org/packages/af/8f/3ed4bfa0c0c72a7ca17f0380cd9e4dd842b09f664e780c13cff1dcf2ef1b/charset_normalizer-3.4.4-cp313-cp313-win_arm64.whl", hash = "sha256:542d2cee80be6f80247095cc36c418f7bddd14f4a6de45af91dfad36d817bba2", size = 100408, upload-time = "2025-10-14T04:41:32.624Z" }, + { url = "https://files.pythonhosted.org/packages/2a/35/7051599bd493e62411d6ede36fd5af83a38f37c4767b92884df7301db25d/charset_normalizer-3.4.4-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:da3326d9e65ef63a817ecbcc0df6e94463713b754fe293eaa03da99befb9a5bd", size = 207746, upload-time = "2025-10-14T04:41:33.773Z" }, + { url = "https://files.pythonhosted.org/packages/10/9a/97c8d48ef10d6cd4fcead2415523221624bf58bcf68a802721a6bc807c8f/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8af65f14dc14a79b924524b1e7fffe304517b2bff5a58bf64f30b98bbc5079eb", size = 147889, upload-time = "2025-10-14T04:41:34.897Z" }, + { url = "https://files.pythonhosted.org/packages/10/bf/979224a919a1b606c82bd2c5fa49b5c6d5727aa47b4312bb27b1734f53cd/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:74664978bb272435107de04e36db5a9735e78232b85b77d45cfb38f758efd33e", size = 143641, upload-time = "2025-10-14T04:41:36.116Z" }, + { url = "https://files.pythonhosted.org/packages/ba/33/0ad65587441fc730dc7bd90e9716b30b4702dc7b617e6ba4997dc8651495/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:752944c7ffbfdd10c074dc58ec2d5a8a4cd9493b314d367c14d24c17684ddd14", size = 160779, upload-time = "2025-10-14T04:41:37.229Z" }, + { url = "https://files.pythonhosted.org/packages/67/ed/331d6b249259ee71ddea93f6f2f0a56cfebd46938bde6fcc6f7b9a3d0e09/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d1f13550535ad8cff21b8d757a3257963e951d96e20ec82ab44bc64aeb62a191", size = 159035, upload-time = "2025-10-14T04:41:38.368Z" }, + { url = "https://files.pythonhosted.org/packages/67/ff/f6b948ca32e4f2a4576aa129d8bed61f2e0543bf9f5f2b7fc3758ed005c9/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ecaae4149d99b1c9e7b88bb03e3221956f68fd6d50be2ef061b2381b61d20838", size = 152542, upload-time = "2025-10-14T04:41:39.862Z" }, + { url = "https://files.pythonhosted.org/packages/16/85/276033dcbcc369eb176594de22728541a925b2632f9716428c851b149e83/charset_normalizer-3.4.4-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:cb6254dc36b47a990e59e1068afacdcd02958bdcce30bb50cc1700a8b9d624a6", size = 149524, upload-time = "2025-10-14T04:41:41.319Z" }, + { url = "https://files.pythonhosted.org/packages/9e/f2/6a2a1f722b6aba37050e626530a46a68f74e63683947a8acff92569f979a/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:c8ae8a0f02f57a6e61203a31428fa1d677cbe50c93622b4149d5c0f319c1d19e", size = 150395, upload-time = "2025-10-14T04:41:42.539Z" }, + { url = "https://files.pythonhosted.org/packages/60/bb/2186cb2f2bbaea6338cad15ce23a67f9b0672929744381e28b0592676824/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:47cc91b2f4dd2833fddaedd2893006b0106129d4b94fdb6af1f4ce5a9965577c", size = 143680, upload-time = "2025-10-14T04:41:43.661Z" }, + { url = "https://files.pythonhosted.org/packages/7d/a5/bf6f13b772fbb2a90360eb620d52ed8f796f3c5caee8398c3b2eb7b1c60d/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:82004af6c302b5d3ab2cfc4cc5f29db16123b1a8417f2e25f9066f91d4411090", size = 162045, upload-time = "2025-10-14T04:41:44.821Z" }, + { url = "https://files.pythonhosted.org/packages/df/c5/d1be898bf0dc3ef9030c3825e5d3b83f2c528d207d246cbabe245966808d/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:2b7d8f6c26245217bd2ad053761201e9f9680f8ce52f0fcd8d0755aeae5b2152", size = 149687, upload-time = "2025-10-14T04:41:46.442Z" }, + { url = "https://files.pythonhosted.org/packages/a5/42/90c1f7b9341eef50c8a1cb3f098ac43b0508413f33affd762855f67a410e/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:799a7a5e4fb2d5898c60b640fd4981d6a25f1c11790935a44ce38c54e985f828", size = 160014, upload-time = "2025-10-14T04:41:47.631Z" }, + { url = "https://files.pythonhosted.org/packages/76/be/4d3ee471e8145d12795ab655ece37baed0929462a86e72372fd25859047c/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:99ae2cffebb06e6c22bdc25801d7b30f503cc87dbd283479e7b606f70aff57ec", size = 154044, upload-time = "2025-10-14T04:41:48.81Z" }, + { url = "https://files.pythonhosted.org/packages/b0/6f/8f7af07237c34a1defe7defc565a9bc1807762f672c0fde711a4b22bf9c0/charset_normalizer-3.4.4-cp314-cp314-win32.whl", hash = "sha256:f9d332f8c2a2fcbffe1378594431458ddbef721c1769d78e2cbc06280d8155f9", size = 99940, upload-time = "2025-10-14T04:41:49.946Z" }, + { url = "https://files.pythonhosted.org/packages/4b/51/8ade005e5ca5b0d80fb4aff72a3775b325bdc3d27408c8113811a7cbe640/charset_normalizer-3.4.4-cp314-cp314-win_amd64.whl", hash = "sha256:8a6562c3700cce886c5be75ade4a5db4214fda19fede41d9792d100288d8f94c", size = 107104, upload-time = "2025-10-14T04:41:51.051Z" }, + { url = "https://files.pythonhosted.org/packages/da/5f/6b8f83a55bb8278772c5ae54a577f3099025f9ade59d0136ac24a0df4bde/charset_normalizer-3.4.4-cp314-cp314-win_arm64.whl", hash = "sha256:de00632ca48df9daf77a2c65a484531649261ec9f25489917f09e455cb09ddb2", size = 100743, upload-time = "2025-10-14T04:41:52.122Z" }, + { url = "https://files.pythonhosted.org/packages/0a/4c/925909008ed5a988ccbb72dcc897407e5d6d3bd72410d69e051fc0c14647/charset_normalizer-3.4.4-py3-none-any.whl", hash = "sha256:7a32c560861a02ff789ad905a2fe94e3f840803362c84fecf1851cb4cf3dc37f", size = 53402, upload-time = "2025-10-14T04:42:31.76Z" }, +] + +[[package]] +name = "colorama" +version = "0.4.6" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697, upload-time = "2022-10-25T02:36:22.414Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335, upload-time = "2022-10-25T02:36:20.889Z" }, +] + +[[package]] +name = "dill" +version = "0.4.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/12/80/630b4b88364e9a8c8c5797f4602d0f76ef820909ee32f0bacb9f90654042/dill-0.4.0.tar.gz", hash = "sha256:0633f1d2df477324f53a895b02c901fb961bdbf65a17122586ea7019292cbcf0", size = 186976, upload-time = "2025-04-16T00:41:48.867Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/50/3d/9373ad9c56321fdab5b41197068e1d8c25883b3fea29dd361f9b55116869/dill-0.4.0-py3-none-any.whl", hash = "sha256:44f54bf6412c2c8464c14e8243eb163690a9800dbe2c367330883b19c7561049", size = 119668, upload-time = "2025-04-16T00:41:47.671Z" }, +] + +[[package]] +name = "eval-type-backport" +version = "0.3.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/fb/a3/cafafb4558fd638aadfe4121dc6cefb8d743368c085acb2f521df0f3d9d7/eval_type_backport-0.3.1.tar.gz", hash = "sha256:57e993f7b5b69d271e37482e62f74e76a0276c82490cf8e4f0dffeb6b332d5ed", size = 9445, upload-time = "2025-12-02T11:51:42.987Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cf/22/fdc2e30d43ff853720042fa15baa3e6122722be1a7950a98233ebb55cd71/eval_type_backport-0.3.1-py3-none-any.whl", hash = "sha256:279ab641905e9f11129f56a8a78f493518515b83402b860f6f06dd7c011fdfa8", size = 6063, upload-time = "2025-12-02T11:51:41.665Z" }, +] + +[[package]] +name = "exceptiongroup" +version = "1.3.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/50/79/66800aadf48771f6b62f7eb014e352e5d06856655206165d775e675a02c9/exceptiongroup-1.3.1.tar.gz", hash = "sha256:8b412432c6055b0b7d14c310000ae93352ed6754f70fa8f7c34141f91c4e3219", size = 30371, upload-time = "2025-11-21T23:01:54.787Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8a/0e/97c33bf5009bdbac74fd2beace167cab3f978feb69cc36f1ef79360d6c4e/exceptiongroup-1.3.1-py3-none-any.whl", hash = "sha256:a7a39a3bd276781e98394987d3a5701d0c4edffb633bb7a5144577f82c773598", size = 16740, upload-time = "2025-11-21T23:01:53.443Z" }, +] + +[[package]] +name = "google-auth" +version = "2.45.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cachetools" }, + { name = "pyasn1-modules" }, + { name = "rsa" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e5/00/3c794502a8b892c404b2dea5b3650eb21bfc7069612fbfd15c7f17c1cb0d/google_auth-2.45.0.tar.gz", hash = "sha256:90d3f41b6b72ea72dd9811e765699ee491ab24139f34ebf1ca2b9cc0c38708f3", size = 320708, upload-time = "2025-12-15T22:58:42.889Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c6/97/451d55e05487a5cd6279a01a7e34921858b16f7dc8aa38a2c684743cd2b3/google_auth-2.45.0-py2.py3-none-any.whl", hash = "sha256:82344e86dc00410ef5382d99be677c6043d72e502b625aa4f4afa0bdacca0f36", size = 233312, upload-time = "2025-12-15T22:58:40.777Z" }, +] + +[[package]] +name = "h11" +version = "0.16.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/01/ee/02a2c011bdab74c6fb3c75474d40b3052059d95df7e73351460c8588d963/h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1", size = 101250, upload-time = "2025-04-24T03:35:25.427Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/04/4b/29cac41a4d98d144bf5f6d33995617b185d14b22401f75ca86f384e87ff1/h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86", size = 37515, upload-time = "2025-04-24T03:35:24.344Z" }, +] + +[[package]] +name = "httpcore" +version = "1.0.9" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "h11" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/06/94/82699a10bca87a5556c9c59b5963f2d039dbd239f25bc2a63907a05a14cb/httpcore-1.0.9.tar.gz", hash = "sha256:6e34463af53fd2ab5d807f399a9b45ea31c3dfa2276f15a2c3f00afff6e176e8", size = 85484, upload-time = "2025-04-24T22:06:22.219Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7e/f5/f66802a942d491edb555dd61e3a9961140fd64c90bce1eafd741609d334d/httpcore-1.0.9-py3-none-any.whl", hash = "sha256:2d400746a40668fc9dec9810239072b40b4484b640a8c38fd654a024c7a1bf55", size = 78784, upload-time = "2025-04-24T22:06:20.566Z" }, +] + +[[package]] +name = "httpx" +version = "0.28.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "certifi" }, + { name = "httpcore" }, + { name = "idna" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b1/df/48c586a5fe32a0f01324ee087459e112ebb7224f646c0b5023f5e79e9956/httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc", size = 141406, upload-time = "2024-12-06T15:37:23.222Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2a/39/e50c7c3a983047577ee07d2a9e53faf5a69493943ec3f6a384bdc792deb2/httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad", size = 73517, upload-time = "2024-12-06T15:37:21.509Z" }, +] + +[[package]] +name = "idna" +version = "3.11" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6f/6d/0703ccc57f3a7233505399edb88de3cbd678da106337b9fcde432b65ed60/idna-3.11.tar.gz", hash = "sha256:795dafcc9c04ed0c1fb032c2aa73654d8e8c5023a7df64a53f39190ada629902", size = 194582, upload-time = "2025-10-12T14:55:20.501Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0e/61/66938bbb5fc52dbdf84594873d5b51fb1f7c7794e9c0f5bd885f30bc507b/idna-3.11-py3-none-any.whl", hash = "sha256:771a87f49d9defaf64091e6e6fe9c18d4833f140bd19464795bc32d966ca37ea", size = 71008, upload-time = "2025-10-12T14:55:18.883Z" }, +] + +[[package]] +name = "iniconfig" +version = "2.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/72/34/14ca021ce8e5dfedc35312d08ba8bf51fdd999c576889fc2c24cb97f4f10/iniconfig-2.3.0.tar.gz", hash = "sha256:c76315c77db068650d49c5b56314774a7804df16fee4402c1f19d6d15d8c4730", size = 20503, upload-time = "2025-10-18T21:55:43.219Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cb/b1/3846dd7f199d53cb17f49cba7e651e9ce294d8497c8c150530ed11865bb8/iniconfig-2.3.0-py3-none-any.whl", hash = "sha256:f631c04d2c48c52b84d0d0549c99ff3859c98df65b3101406327ecc7d53fbf12", size = 7484, upload-time = "2025-10-18T21:55:41.639Z" }, +] + +[[package]] +name = "isort" +version = "5.13.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/87/f9/c1eb8635a24e87ade2efce21e3ce8cd6b8630bb685ddc9cdaca1349b2eb5/isort-5.13.2.tar.gz", hash = "sha256:48fdfcb9face5d58a4f6dde2e72a1fb8dcaf8ab26f95ab49fab84c2ddefb0109", size = 175303, upload-time = "2023-12-13T20:37:26.124Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/b3/8def84f539e7d2289a02f0524b944b15d7c75dab7628bedf1c4f0992029c/isort-5.13.2-py3-none-any.whl", hash = "sha256:8ca5e72a8d85860d5a3fa69b8745237f2939afe12dbf656afbcb47fe72d947a6", size = 92310, upload-time = "2023-12-13T20:37:23.244Z" }, +] + +[[package]] +name = "mccabe" +version = "0.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e7/ff/0ffefdcac38932a54d2b5eed4e0ba8a408f215002cd178ad1df0f2806ff8/mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325", size = 9658, upload-time = "2022-01-24T01:14:51.113Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/27/1a/1f68f9ba0c207934b35b86a8ca3aad8395a3d6dd7921c0686e23853ff5a9/mccabe-0.7.0-py2.py3-none-any.whl", hash = "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e", size = 7350, upload-time = "2022-01-24T01:14:49.62Z" }, +] + +[[package]] +name = "mistralai-gcp" +version = "2.0.0rc1" +source = { editable = "." } +dependencies = [ + { name = "eval-type-backport" }, + { name = "google-auth" }, + { name = "httpx" }, + { name = "pydantic" }, + { name = "python-dateutil" }, + { name = "requests" }, + { name = "typing-inspection" }, +] + +[package.dev-dependencies] +dev = [ + { name = "mypy" }, + { name = "pylint" }, + { name = "pyright" }, + { name = "pytest" }, + { name = "pytest-asyncio" }, + { name = "types-python-dateutil" }, +] + +[package.metadata] +requires-dist = [ + { name = "eval-type-backport", specifier = ">=0.2.0" }, + { name = "google-auth", specifier = ">=2.31.0,<3.0.0" }, + { name = "httpx", specifier = ">=0.28.1" }, + { name = "pydantic", specifier = ">=2.11.2" }, + { name = "python-dateutil", specifier = ">=2.8.2" }, + { name = "requests", specifier = ">=2.32.3,<3.0.0" }, + { name = "typing-inspection", specifier = ">=0.4.0" }, +] + +[package.metadata.requires-dev] +dev = [ + { name = "mypy", specifier = "==1.15.0" }, + { name = "pylint", specifier = "==3.2.3" }, + { name = "pyright", specifier = ">=1.1.401,<2" }, + { name = "pytest", specifier = ">=8.2.2,<9" }, + { name = "pytest-asyncio", specifier = ">=0.23.7,<0.24" }, + { name = "types-python-dateutil", specifier = ">=2.9.0.20240316,<3" }, +] + +[[package]] +name = "mypy" +version = "1.15.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "mypy-extensions" }, + { name = "tomli", marker = "python_full_version < '3.11'" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ce/43/d5e49a86afa64bd3839ea0d5b9c7103487007d728e1293f52525d6d5486a/mypy-1.15.0.tar.gz", hash = "sha256:404534629d51d3efea5c800ee7c42b72a6554d6c400e6a79eafe15d11341fd43", size = 3239717, upload-time = "2025-02-05T03:50:34.655Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/68/f8/65a7ce8d0e09b6329ad0c8d40330d100ea343bd4dd04c4f8ae26462d0a17/mypy-1.15.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:979e4e1a006511dacf628e36fadfecbcc0160a8af6ca7dad2f5025529e082c13", size = 10738433, upload-time = "2025-02-05T03:49:29.145Z" }, + { url = "https://files.pythonhosted.org/packages/b4/95/9c0ecb8eacfe048583706249439ff52105b3f552ea9c4024166c03224270/mypy-1.15.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c4bb0e1bd29f7d34efcccd71cf733580191e9a264a2202b0239da95984c5b559", size = 9861472, upload-time = "2025-02-05T03:49:16.986Z" }, + { url = "https://files.pythonhosted.org/packages/84/09/9ec95e982e282e20c0d5407bc65031dfd0f0f8ecc66b69538296e06fcbee/mypy-1.15.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:be68172e9fd9ad8fb876c6389f16d1c1b5f100ffa779f77b1fb2176fcc9ab95b", size = 11611424, upload-time = "2025-02-05T03:49:46.908Z" }, + { url = "https://files.pythonhosted.org/packages/78/13/f7d14e55865036a1e6a0a69580c240f43bc1f37407fe9235c0d4ef25ffb0/mypy-1.15.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c7be1e46525adfa0d97681432ee9fcd61a3964c2446795714699a998d193f1a3", size = 12365450, upload-time = "2025-02-05T03:50:05.89Z" }, + { url = "https://files.pythonhosted.org/packages/48/e1/301a73852d40c241e915ac6d7bcd7fedd47d519246db2d7b86b9d7e7a0cb/mypy-1.15.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:2e2c2e6d3593f6451b18588848e66260ff62ccca522dd231cd4dd59b0160668b", size = 12551765, upload-time = "2025-02-05T03:49:33.56Z" }, + { url = "https://files.pythonhosted.org/packages/77/ba/c37bc323ae5fe7f3f15a28e06ab012cd0b7552886118943e90b15af31195/mypy-1.15.0-cp310-cp310-win_amd64.whl", hash = "sha256:6983aae8b2f653e098edb77f893f7b6aca69f6cffb19b2cc7443f23cce5f4828", size = 9274701, upload-time = "2025-02-05T03:49:38.981Z" }, + { url = "https://files.pythonhosted.org/packages/03/bc/f6339726c627bd7ca1ce0fa56c9ae2d0144604a319e0e339bdadafbbb599/mypy-1.15.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2922d42e16d6de288022e5ca321cd0618b238cfc5570e0263e5ba0a77dbef56f", size = 10662338, upload-time = "2025-02-05T03:50:17.287Z" }, + { url = "https://files.pythonhosted.org/packages/e2/90/8dcf506ca1a09b0d17555cc00cd69aee402c203911410136cd716559efe7/mypy-1.15.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2ee2d57e01a7c35de00f4634ba1bbf015185b219e4dc5909e281016df43f5ee5", size = 9787540, upload-time = "2025-02-05T03:49:51.21Z" }, + { url = "https://files.pythonhosted.org/packages/05/05/a10f9479681e5da09ef2f9426f650d7b550d4bafbef683b69aad1ba87457/mypy-1.15.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:973500e0774b85d9689715feeffcc980193086551110fd678ebe1f4342fb7c5e", size = 11538051, upload-time = "2025-02-05T03:50:20.885Z" }, + { url = "https://files.pythonhosted.org/packages/e9/9a/1f7d18b30edd57441a6411fcbc0c6869448d1a4bacbaee60656ac0fc29c8/mypy-1.15.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5a95fb17c13e29d2d5195869262f8125dfdb5c134dc8d9a9d0aecf7525b10c2c", size = 12286751, upload-time = "2025-02-05T03:49:42.408Z" }, + { url = "https://files.pythonhosted.org/packages/72/af/19ff499b6f1dafcaf56f9881f7a965ac2f474f69f6f618b5175b044299f5/mypy-1.15.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1905f494bfd7d85a23a88c5d97840888a7bd516545fc5aaedff0267e0bb54e2f", size = 12421783, upload-time = "2025-02-05T03:49:07.707Z" }, + { url = "https://files.pythonhosted.org/packages/96/39/11b57431a1f686c1aed54bf794870efe0f6aeca11aca281a0bd87a5ad42c/mypy-1.15.0-cp311-cp311-win_amd64.whl", hash = "sha256:c9817fa23833ff189db061e6d2eff49b2f3b6ed9856b4a0a73046e41932d744f", size = 9265618, upload-time = "2025-02-05T03:49:54.581Z" }, + { url = "https://files.pythonhosted.org/packages/98/3a/03c74331c5eb8bd025734e04c9840532226775c47a2c39b56a0c8d4f128d/mypy-1.15.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:aea39e0583d05124836ea645f412e88a5c7d0fd77a6d694b60d9b6b2d9f184fd", size = 10793981, upload-time = "2025-02-05T03:50:28.25Z" }, + { url = "https://files.pythonhosted.org/packages/f0/1a/41759b18f2cfd568848a37c89030aeb03534411eef981df621d8fad08a1d/mypy-1.15.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2f2147ab812b75e5b5499b01ade1f4a81489a147c01585cda36019102538615f", size = 9749175, upload-time = "2025-02-05T03:50:13.411Z" }, + { url = "https://files.pythonhosted.org/packages/12/7e/873481abf1ef112c582db832740f4c11b2bfa510e829d6da29b0ab8c3f9c/mypy-1.15.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ce436f4c6d218a070048ed6a44c0bbb10cd2cc5e272b29e7845f6a2f57ee4464", size = 11455675, upload-time = "2025-02-05T03:50:31.421Z" }, + { url = "https://files.pythonhosted.org/packages/b3/d0/92ae4cde706923a2d3f2d6c39629134063ff64b9dedca9c1388363da072d/mypy-1.15.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8023ff13985661b50a5928fc7a5ca15f3d1affb41e5f0a9952cb68ef090b31ee", size = 12410020, upload-time = "2025-02-05T03:48:48.705Z" }, + { url = "https://files.pythonhosted.org/packages/46/8b/df49974b337cce35f828ba6fda228152d6db45fed4c86ba56ffe442434fd/mypy-1.15.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1124a18bc11a6a62887e3e137f37f53fbae476dc36c185d549d4f837a2a6a14e", size = 12498582, upload-time = "2025-02-05T03:49:03.628Z" }, + { url = "https://files.pythonhosted.org/packages/13/50/da5203fcf6c53044a0b699939f31075c45ae8a4cadf538a9069b165c1050/mypy-1.15.0-cp312-cp312-win_amd64.whl", hash = "sha256:171a9ca9a40cd1843abeca0e405bc1940cd9b305eaeea2dda769ba096932bb22", size = 9366614, upload-time = "2025-02-05T03:50:00.313Z" }, + { url = "https://files.pythonhosted.org/packages/6a/9b/fd2e05d6ffff24d912f150b87db9e364fa8282045c875654ce7e32fffa66/mypy-1.15.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:93faf3fdb04768d44bf28693293f3904bbb555d076b781ad2530214ee53e3445", size = 10788592, upload-time = "2025-02-05T03:48:55.789Z" }, + { url = "https://files.pythonhosted.org/packages/74/37/b246d711c28a03ead1fd906bbc7106659aed7c089d55fe40dd58db812628/mypy-1.15.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:811aeccadfb730024c5d3e326b2fbe9249bb7413553f15499a4050f7c30e801d", size = 9753611, upload-time = "2025-02-05T03:48:44.581Z" }, + { url = "https://files.pythonhosted.org/packages/a6/ac/395808a92e10cfdac8003c3de9a2ab6dc7cde6c0d2a4df3df1b815ffd067/mypy-1.15.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:98b7b9b9aedb65fe628c62a6dc57f6d5088ef2dfca37903a7d9ee374d03acca5", size = 11438443, upload-time = "2025-02-05T03:49:25.514Z" }, + { url = "https://files.pythonhosted.org/packages/d2/8b/801aa06445d2de3895f59e476f38f3f8d610ef5d6908245f07d002676cbf/mypy-1.15.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c43a7682e24b4f576d93072216bf56eeff70d9140241f9edec0c104d0c515036", size = 12402541, upload-time = "2025-02-05T03:49:57.623Z" }, + { url = "https://files.pythonhosted.org/packages/c7/67/5a4268782eb77344cc613a4cf23540928e41f018a9a1ec4c6882baf20ab8/mypy-1.15.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:baefc32840a9f00babd83251560e0ae1573e2f9d1b067719479bfb0e987c6357", size = 12494348, upload-time = "2025-02-05T03:48:52.361Z" }, + { url = "https://files.pythonhosted.org/packages/83/3e/57bb447f7bbbfaabf1712d96f9df142624a386d98fb026a761532526057e/mypy-1.15.0-cp313-cp313-win_amd64.whl", hash = "sha256:b9378e2c00146c44793c98b8d5a61039a048e31f429fb0eb546d93f4b000bedf", size = 9373648, upload-time = "2025-02-05T03:49:11.395Z" }, + { url = "https://files.pythonhosted.org/packages/09/4e/a7d65c7322c510de2c409ff3828b03354a7c43f5a8ed458a7a131b41c7b9/mypy-1.15.0-py3-none-any.whl", hash = "sha256:5469affef548bd1895d86d3bf10ce2b44e33d86923c29e4d675b3e323437ea3e", size = 2221777, upload-time = "2025-02-05T03:50:08.348Z" }, +] + +[[package]] +name = "mypy-extensions" +version = "1.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a2/6e/371856a3fb9d31ca8dac321cda606860fa4548858c0cc45d9d1d4ca2628b/mypy_extensions-1.1.0.tar.gz", hash = "sha256:52e68efc3284861e772bbcd66823fde5ae21fd2fdb51c62a211403730b916558", size = 6343, upload-time = "2025-04-22T14:54:24.164Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/79/7b/2c79738432f5c924bef5071f933bcc9efd0473bac3b4aa584a6f7c1c8df8/mypy_extensions-1.1.0-py3-none-any.whl", hash = "sha256:1be4cccdb0f2482337c4743e60421de3a356cd97508abadd57d47403e94f5505", size = 4963, upload-time = "2025-04-22T14:54:22.983Z" }, +] + +[[package]] +name = "nodeenv" +version = "1.10.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/24/bf/d1bda4f6168e0b2e9e5958945e01910052158313224ada5ce1fb2e1113b8/nodeenv-1.10.0.tar.gz", hash = "sha256:996c191ad80897d076bdfba80a41994c2b47c68e224c542b48feba42ba00f8bb", size = 55611, upload-time = "2025-12-20T14:08:54.006Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/88/b2/d0896bdcdc8d28a7fc5717c305f1a861c26e18c05047949fb371034d98bd/nodeenv-1.10.0-py2.py3-none-any.whl", hash = "sha256:5bb13e3eed2923615535339b3c620e76779af4cb4c6a90deccc9e36b274d3827", size = 23438, upload-time = "2025-12-20T14:08:52.782Z" }, +] + +[[package]] +name = "packaging" +version = "25.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a1/d4/1fc4078c65507b51b96ca8f8c3ba19e6a61c8253c72794544580a7b6c24d/packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f", size = 165727, upload-time = "2025-04-19T11:48:59.673Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/20/12/38679034af332785aac8774540895e234f4d07f7545804097de4b666afd8/packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484", size = 66469, upload-time = "2025-04-19T11:48:57.875Z" }, +] + +[[package]] +name = "platformdirs" +version = "4.5.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/cf/86/0248f086a84f01b37aaec0fa567b397df1a119f73c16f6c7a9aac73ea309/platformdirs-4.5.1.tar.gz", hash = "sha256:61d5cdcc6065745cdd94f0f878977f8de9437be93de97c1c12f853c9c0cdcbda", size = 21715, upload-time = "2025-12-05T13:52:58.638Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cb/28/3bfe2fa5a7b9c46fe7e13c97bda14c895fb10fa2ebf1d0abb90e0cea7ee1/platformdirs-4.5.1-py3-none-any.whl", hash = "sha256:d03afa3963c806a9bed9d5125c8f4cb2fdaf74a55ab60e5d59b3fde758104d31", size = 18731, upload-time = "2025-12-05T13:52:56.823Z" }, +] + +[[package]] +name = "pluggy" +version = "1.6.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f9/e2/3e91f31a7d2b083fe6ef3fa267035b518369d9511ffab804f839851d2779/pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3", size = 69412, upload-time = "2025-05-15T12:30:07.975Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/54/20/4d324d65cc6d9205fabedc306948156824eb9f0ee1633355a8f7ec5c66bf/pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746", size = 20538, upload-time = "2025-05-15T12:30:06.134Z" }, +] + +[[package]] +name = "pyasn1" +version = "0.6.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ba/e9/01f1a64245b89f039897cb0130016d79f77d52669aae6ee7b159a6c4c018/pyasn1-0.6.1.tar.gz", hash = "sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034", size = 145322, upload-time = "2024-09-10T22:41:42.55Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c8/f1/d6a797abb14f6283c0ddff96bbdd46937f64122b8c925cab503dd37f8214/pyasn1-0.6.1-py3-none-any.whl", hash = "sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629", size = 83135, upload-time = "2024-09-11T16:00:36.122Z" }, +] + +[[package]] +name = "pyasn1-modules" +version = "0.4.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pyasn1" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e9/e6/78ebbb10a8c8e4b61a59249394a4a594c1a7af95593dc933a349c8d00964/pyasn1_modules-0.4.2.tar.gz", hash = "sha256:677091de870a80aae844b1ca6134f54652fa2c8c5a52aa396440ac3106e941e6", size = 307892, upload-time = "2025-03-28T02:41:22.17Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/47/8d/d529b5d697919ba8c11ad626e835d4039be708a35b0d22de83a269a6682c/pyasn1_modules-0.4.2-py3-none-any.whl", hash = "sha256:29253a9207ce32b64c3ac6600edc75368f98473906e8fd1043bd6b5b1de2c14a", size = 181259, upload-time = "2025-03-28T02:41:19.028Z" }, +] + +[[package]] +name = "pydantic" +version = "2.12.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "annotated-types" }, + { name = "pydantic-core" }, + { name = "typing-extensions" }, + { name = "typing-inspection" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/69/44/36f1a6e523abc58ae5f928898e4aca2e0ea509b5aa6f6f392a5d882be928/pydantic-2.12.5.tar.gz", hash = "sha256:4d351024c75c0f085a9febbb665ce8c0c6ec5d30e903bdb6394b7ede26aebb49", size = 821591, upload-time = "2025-11-26T15:11:46.471Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5a/87/b70ad306ebb6f9b585f114d0ac2137d792b48be34d732d60e597c2f8465a/pydantic-2.12.5-py3-none-any.whl", hash = "sha256:e561593fccf61e8a20fc46dfc2dfe075b8be7d0188df33f221ad1f0139180f9d", size = 463580, upload-time = "2025-11-26T15:11:44.605Z" }, +] + +[[package]] +name = "pydantic-core" +version = "2.41.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/71/70/23b021c950c2addd24ec408e9ab05d59b035b39d97cdc1130e1bce647bb6/pydantic_core-2.41.5.tar.gz", hash = "sha256:08daa51ea16ad373ffd5e7606252cc32f07bc72b28284b6bc9c6df804816476e", size = 460952, upload-time = "2025-11-04T13:43:49.098Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c6/90/32c9941e728d564b411d574d8ee0cf09b12ec978cb22b294995bae5549a5/pydantic_core-2.41.5-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:77b63866ca88d804225eaa4af3e664c5faf3568cea95360d21f4725ab6e07146", size = 2107298, upload-time = "2025-11-04T13:39:04.116Z" }, + { url = "https://files.pythonhosted.org/packages/fb/a8/61c96a77fe28993d9a6fb0f4127e05430a267b235a124545d79fea46dd65/pydantic_core-2.41.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:dfa8a0c812ac681395907e71e1274819dec685fec28273a28905df579ef137e2", size = 1901475, upload-time = "2025-11-04T13:39:06.055Z" }, + { url = "https://files.pythonhosted.org/packages/5d/b6/338abf60225acc18cdc08b4faef592d0310923d19a87fba1faf05af5346e/pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5921a4d3ca3aee735d9fd163808f5e8dd6c6972101e4adbda9a4667908849b97", size = 1918815, upload-time = "2025-11-04T13:39:10.41Z" }, + { url = "https://files.pythonhosted.org/packages/d1/1c/2ed0433e682983d8e8cba9c8d8ef274d4791ec6a6f24c58935b90e780e0a/pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e25c479382d26a2a41b7ebea1043564a937db462816ea07afa8a44c0866d52f9", size = 2065567, upload-time = "2025-11-04T13:39:12.244Z" }, + { url = "https://files.pythonhosted.org/packages/b3/24/cf84974ee7d6eae06b9e63289b7b8f6549d416b5c199ca2d7ce13bbcf619/pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f547144f2966e1e16ae626d8ce72b4cfa0caedc7fa28052001c94fb2fcaa1c52", size = 2230442, upload-time = "2025-11-04T13:39:13.962Z" }, + { url = "https://files.pythonhosted.org/packages/fd/21/4e287865504b3edc0136c89c9c09431be326168b1eb7841911cbc877a995/pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6f52298fbd394f9ed112d56f3d11aabd0d5bd27beb3084cc3d8ad069483b8941", size = 2350956, upload-time = "2025-11-04T13:39:15.889Z" }, + { url = "https://files.pythonhosted.org/packages/a8/76/7727ef2ffa4b62fcab916686a68a0426b9b790139720e1934e8ba797e238/pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:100baa204bb412b74fe285fb0f3a385256dad1d1879f0a5cb1499ed2e83d132a", size = 2068253, upload-time = "2025-11-04T13:39:17.403Z" }, + { url = "https://files.pythonhosted.org/packages/d5/8c/a4abfc79604bcb4c748e18975c44f94f756f08fb04218d5cb87eb0d3a63e/pydantic_core-2.41.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:05a2c8852530ad2812cb7914dc61a1125dc4e06252ee98e5638a12da6cc6fb6c", size = 2177050, upload-time = "2025-11-04T13:39:19.351Z" }, + { url = "https://files.pythonhosted.org/packages/67/b1/de2e9a9a79b480f9cb0b6e8b6ba4c50b18d4e89852426364c66aa82bb7b3/pydantic_core-2.41.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:29452c56df2ed968d18d7e21f4ab0ac55e71dc59524872f6fc57dcf4a3249ed2", size = 2147178, upload-time = "2025-11-04T13:39:21Z" }, + { url = "https://files.pythonhosted.org/packages/16/c1/dfb33f837a47b20417500efaa0378adc6635b3c79e8369ff7a03c494b4ac/pydantic_core-2.41.5-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:d5160812ea7a8a2ffbe233d8da666880cad0cbaf5d4de74ae15c313213d62556", size = 2341833, upload-time = "2025-11-04T13:39:22.606Z" }, + { url = "https://files.pythonhosted.org/packages/47/36/00f398642a0f4b815a9a558c4f1dca1b4020a7d49562807d7bc9ff279a6c/pydantic_core-2.41.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:df3959765b553b9440adfd3c795617c352154e497a4eaf3752555cfb5da8fc49", size = 2321156, upload-time = "2025-11-04T13:39:25.843Z" }, + { url = "https://files.pythonhosted.org/packages/7e/70/cad3acd89fde2010807354d978725ae111ddf6d0ea46d1ea1775b5c1bd0c/pydantic_core-2.41.5-cp310-cp310-win32.whl", hash = "sha256:1f8d33a7f4d5a7889e60dc39856d76d09333d8a6ed0f5f1190635cbec70ec4ba", size = 1989378, upload-time = "2025-11-04T13:39:27.92Z" }, + { url = "https://files.pythonhosted.org/packages/76/92/d338652464c6c367e5608e4488201702cd1cbb0f33f7b6a85a60fe5f3720/pydantic_core-2.41.5-cp310-cp310-win_amd64.whl", hash = "sha256:62de39db01b8d593e45871af2af9e497295db8d73b085f6bfd0b18c83c70a8f9", size = 2013622, upload-time = "2025-11-04T13:39:29.848Z" }, + { url = "https://files.pythonhosted.org/packages/e8/72/74a989dd9f2084b3d9530b0915fdda64ac48831c30dbf7c72a41a5232db8/pydantic_core-2.41.5-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:a3a52f6156e73e7ccb0f8cced536adccb7042be67cb45f9562e12b319c119da6", size = 2105873, upload-time = "2025-11-04T13:39:31.373Z" }, + { url = "https://files.pythonhosted.org/packages/12/44/37e403fd9455708b3b942949e1d7febc02167662bf1a7da5b78ee1ea2842/pydantic_core-2.41.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7f3bf998340c6d4b0c9a2f02d6a400e51f123b59565d74dc60d252ce888c260b", size = 1899826, upload-time = "2025-11-04T13:39:32.897Z" }, + { url = "https://files.pythonhosted.org/packages/33/7f/1d5cab3ccf44c1935a359d51a8a2a9e1a654b744b5e7f80d41b88d501eec/pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:378bec5c66998815d224c9ca994f1e14c0c21cb95d2f52b6021cc0b2a58f2a5a", size = 1917869, upload-time = "2025-11-04T13:39:34.469Z" }, + { url = "https://files.pythonhosted.org/packages/6e/6a/30d94a9674a7fe4f4744052ed6c5e083424510be1e93da5bc47569d11810/pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e7b576130c69225432866fe2f4a469a85a54ade141d96fd396dffcf607b558f8", size = 2063890, upload-time = "2025-11-04T13:39:36.053Z" }, + { url = "https://files.pythonhosted.org/packages/50/be/76e5d46203fcb2750e542f32e6c371ffa9b8ad17364cf94bb0818dbfb50c/pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6cb58b9c66f7e4179a2d5e0f849c48eff5c1fca560994d6eb6543abf955a149e", size = 2229740, upload-time = "2025-11-04T13:39:37.753Z" }, + { url = "https://files.pythonhosted.org/packages/d3/ee/fed784df0144793489f87db310a6bbf8118d7b630ed07aa180d6067e653a/pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:88942d3a3dff3afc8288c21e565e476fc278902ae4d6d134f1eeda118cc830b1", size = 2350021, upload-time = "2025-11-04T13:39:40.94Z" }, + { url = "https://files.pythonhosted.org/packages/c8/be/8fed28dd0a180dca19e72c233cbf58efa36df055e5b9d90d64fd1740b828/pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f31d95a179f8d64d90f6831d71fa93290893a33148d890ba15de25642c5d075b", size = 2066378, upload-time = "2025-11-04T13:39:42.523Z" }, + { url = "https://files.pythonhosted.org/packages/b0/3b/698cf8ae1d536a010e05121b4958b1257f0b5522085e335360e53a6b1c8b/pydantic_core-2.41.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c1df3d34aced70add6f867a8cf413e299177e0c22660cc767218373d0779487b", size = 2175761, upload-time = "2025-11-04T13:39:44.553Z" }, + { url = "https://files.pythonhosted.org/packages/b8/ba/15d537423939553116dea94ce02f9c31be0fa9d0b806d427e0308ec17145/pydantic_core-2.41.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:4009935984bd36bd2c774e13f9a09563ce8de4abaa7226f5108262fa3e637284", size = 2146303, upload-time = "2025-11-04T13:39:46.238Z" }, + { url = "https://files.pythonhosted.org/packages/58/7f/0de669bf37d206723795f9c90c82966726a2ab06c336deba4735b55af431/pydantic_core-2.41.5-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:34a64bc3441dc1213096a20fe27e8e128bd3ff89921706e83c0b1ac971276594", size = 2340355, upload-time = "2025-11-04T13:39:48.002Z" }, + { url = "https://files.pythonhosted.org/packages/e5/de/e7482c435b83d7e3c3ee5ee4451f6e8973cff0eb6007d2872ce6383f6398/pydantic_core-2.41.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:c9e19dd6e28fdcaa5a1de679aec4141f691023916427ef9bae8584f9c2fb3b0e", size = 2319875, upload-time = "2025-11-04T13:39:49.705Z" }, + { url = "https://files.pythonhosted.org/packages/fe/e6/8c9e81bb6dd7560e33b9053351c29f30c8194b72f2d6932888581f503482/pydantic_core-2.41.5-cp311-cp311-win32.whl", hash = "sha256:2c010c6ded393148374c0f6f0bf89d206bf3217f201faa0635dcd56bd1520f6b", size = 1987549, upload-time = "2025-11-04T13:39:51.842Z" }, + { url = "https://files.pythonhosted.org/packages/11/66/f14d1d978ea94d1bc21fc98fcf570f9542fe55bfcc40269d4e1a21c19bf7/pydantic_core-2.41.5-cp311-cp311-win_amd64.whl", hash = "sha256:76ee27c6e9c7f16f47db7a94157112a2f3a00e958bc626e2f4ee8bec5c328fbe", size = 2011305, upload-time = "2025-11-04T13:39:53.485Z" }, + { url = "https://files.pythonhosted.org/packages/56/d8/0e271434e8efd03186c5386671328154ee349ff0354d83c74f5caaf096ed/pydantic_core-2.41.5-cp311-cp311-win_arm64.whl", hash = "sha256:4bc36bbc0b7584de96561184ad7f012478987882ebf9f9c389b23f432ea3d90f", size = 1972902, upload-time = "2025-11-04T13:39:56.488Z" }, + { url = "https://files.pythonhosted.org/packages/5f/5d/5f6c63eebb5afee93bcaae4ce9a898f3373ca23df3ccaef086d0233a35a7/pydantic_core-2.41.5-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:f41a7489d32336dbf2199c8c0a215390a751c5b014c2c1c5366e817202e9cdf7", size = 2110990, upload-time = "2025-11-04T13:39:58.079Z" }, + { url = "https://files.pythonhosted.org/packages/aa/32/9c2e8ccb57c01111e0fd091f236c7b371c1bccea0fa85247ac55b1e2b6b6/pydantic_core-2.41.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:070259a8818988b9a84a449a2a7337c7f430a22acc0859c6b110aa7212a6d9c0", size = 1896003, upload-time = "2025-11-04T13:39:59.956Z" }, + { url = "https://files.pythonhosted.org/packages/68/b8/a01b53cb0e59139fbc9e4fda3e9724ede8de279097179be4ff31f1abb65a/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e96cea19e34778f8d59fe40775a7a574d95816eb150850a85a7a4c8f4b94ac69", size = 1919200, upload-time = "2025-11-04T13:40:02.241Z" }, + { url = "https://files.pythonhosted.org/packages/38/de/8c36b5198a29bdaade07b5985e80a233a5ac27137846f3bc2d3b40a47360/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed2e99c456e3fadd05c991f8f437ef902e00eedf34320ba2b0842bd1c3ca3a75", size = 2052578, upload-time = "2025-11-04T13:40:04.401Z" }, + { url = "https://files.pythonhosted.org/packages/00/b5/0e8e4b5b081eac6cb3dbb7e60a65907549a1ce035a724368c330112adfdd/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:65840751b72fbfd82c3c640cff9284545342a4f1eb1586ad0636955b261b0b05", size = 2208504, upload-time = "2025-11-04T13:40:06.072Z" }, + { url = "https://files.pythonhosted.org/packages/77/56/87a61aad59c7c5b9dc8caad5a41a5545cba3810c3e828708b3d7404f6cef/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e536c98a7626a98feb2d3eaf75944ef6f3dbee447e1f841eae16f2f0a72d8ddc", size = 2335816, upload-time = "2025-11-04T13:40:07.835Z" }, + { url = "https://files.pythonhosted.org/packages/0d/76/941cc9f73529988688a665a5c0ecff1112b3d95ab48f81db5f7606f522d3/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eceb81a8d74f9267ef4081e246ffd6d129da5d87e37a77c9bde550cb04870c1c", size = 2075366, upload-time = "2025-11-04T13:40:09.804Z" }, + { url = "https://files.pythonhosted.org/packages/d3/43/ebef01f69baa07a482844faaa0a591bad1ef129253ffd0cdaa9d8a7f72d3/pydantic_core-2.41.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d38548150c39b74aeeb0ce8ee1d8e82696f4a4e16ddc6de7b1d8823f7de4b9b5", size = 2171698, upload-time = "2025-11-04T13:40:12.004Z" }, + { url = "https://files.pythonhosted.org/packages/b1/87/41f3202e4193e3bacfc2c065fab7706ebe81af46a83d3e27605029c1f5a6/pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:c23e27686783f60290e36827f9c626e63154b82b116d7fe9adba1fda36da706c", size = 2132603, upload-time = "2025-11-04T13:40:13.868Z" }, + { url = "https://files.pythonhosted.org/packages/49/7d/4c00df99cb12070b6bccdef4a195255e6020a550d572768d92cc54dba91a/pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:482c982f814460eabe1d3bb0adfdc583387bd4691ef00b90575ca0d2b6fe2294", size = 2329591, upload-time = "2025-11-04T13:40:15.672Z" }, + { url = "https://files.pythonhosted.org/packages/cc/6a/ebf4b1d65d458f3cda6a7335d141305dfa19bdc61140a884d165a8a1bbc7/pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:bfea2a5f0b4d8d43adf9d7b8bf019fb46fdd10a2e5cde477fbcb9d1fa08c68e1", size = 2319068, upload-time = "2025-11-04T13:40:17.532Z" }, + { url = "https://files.pythonhosted.org/packages/49/3b/774f2b5cd4192d5ab75870ce4381fd89cf218af999515baf07e7206753f0/pydantic_core-2.41.5-cp312-cp312-win32.whl", hash = "sha256:b74557b16e390ec12dca509bce9264c3bbd128f8a2c376eaa68003d7f327276d", size = 1985908, upload-time = "2025-11-04T13:40:19.309Z" }, + { url = "https://files.pythonhosted.org/packages/86/45/00173a033c801cacf67c190fef088789394feaf88a98a7035b0e40d53dc9/pydantic_core-2.41.5-cp312-cp312-win_amd64.whl", hash = "sha256:1962293292865bca8e54702b08a4f26da73adc83dd1fcf26fbc875b35d81c815", size = 2020145, upload-time = "2025-11-04T13:40:21.548Z" }, + { url = "https://files.pythonhosted.org/packages/f9/22/91fbc821fa6d261b376a3f73809f907cec5ca6025642c463d3488aad22fb/pydantic_core-2.41.5-cp312-cp312-win_arm64.whl", hash = "sha256:1746d4a3d9a794cacae06a5eaaccb4b8643a131d45fbc9af23e353dc0a5ba5c3", size = 1976179, upload-time = "2025-11-04T13:40:23.393Z" }, + { url = "https://files.pythonhosted.org/packages/87/06/8806241ff1f70d9939f9af039c6c35f2360cf16e93c2ca76f184e76b1564/pydantic_core-2.41.5-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:941103c9be18ac8daf7b7adca8228f8ed6bb7a1849020f643b3a14d15b1924d9", size = 2120403, upload-time = "2025-11-04T13:40:25.248Z" }, + { url = "https://files.pythonhosted.org/packages/94/02/abfa0e0bda67faa65fef1c84971c7e45928e108fe24333c81f3bfe35d5f5/pydantic_core-2.41.5-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:112e305c3314f40c93998e567879e887a3160bb8689ef3d2c04b6cc62c33ac34", size = 1896206, upload-time = "2025-11-04T13:40:27.099Z" }, + { url = "https://files.pythonhosted.org/packages/15/df/a4c740c0943e93e6500f9eb23f4ca7ec9bf71b19e608ae5b579678c8d02f/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0cbaad15cb0c90aa221d43c00e77bb33c93e8d36e0bf74760cd00e732d10a6a0", size = 1919307, upload-time = "2025-11-04T13:40:29.806Z" }, + { url = "https://files.pythonhosted.org/packages/9a/e3/6324802931ae1d123528988e0e86587c2072ac2e5394b4bc2bc34b61ff6e/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:03ca43e12fab6023fc79d28ca6b39b05f794ad08ec2feccc59a339b02f2b3d33", size = 2063258, upload-time = "2025-11-04T13:40:33.544Z" }, + { url = "https://files.pythonhosted.org/packages/c9/d4/2230d7151d4957dd79c3044ea26346c148c98fbf0ee6ebd41056f2d62ab5/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dc799088c08fa04e43144b164feb0c13f9a0bc40503f8df3e9fde58a3c0c101e", size = 2214917, upload-time = "2025-11-04T13:40:35.479Z" }, + { url = "https://files.pythonhosted.org/packages/e6/9f/eaac5df17a3672fef0081b6c1bb0b82b33ee89aa5cec0d7b05f52fd4a1fa/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:97aeba56665b4c3235a0e52b2c2f5ae9cd071b8a8310ad27bddb3f7fb30e9aa2", size = 2332186, upload-time = "2025-11-04T13:40:37.436Z" }, + { url = "https://files.pythonhosted.org/packages/cf/4e/35a80cae583a37cf15604b44240e45c05e04e86f9cfd766623149297e971/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:406bf18d345822d6c21366031003612b9c77b3e29ffdb0f612367352aab7d586", size = 2073164, upload-time = "2025-11-04T13:40:40.289Z" }, + { url = "https://files.pythonhosted.org/packages/bf/e3/f6e262673c6140dd3305d144d032f7bd5f7497d3871c1428521f19f9efa2/pydantic_core-2.41.5-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b93590ae81f7010dbe380cdeab6f515902ebcbefe0b9327cc4804d74e93ae69d", size = 2179146, upload-time = "2025-11-04T13:40:42.809Z" }, + { url = "https://files.pythonhosted.org/packages/75/c7/20bd7fc05f0c6ea2056a4565c6f36f8968c0924f19b7d97bbfea55780e73/pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:01a3d0ab748ee531f4ea6c3e48ad9dac84ddba4b0d82291f87248f2f9de8d740", size = 2137788, upload-time = "2025-11-04T13:40:44.752Z" }, + { url = "https://files.pythonhosted.org/packages/3a/8d/34318ef985c45196e004bc46c6eab2eda437e744c124ef0dbe1ff2c9d06b/pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:6561e94ba9dacc9c61bce40e2d6bdc3bfaa0259d3ff36ace3b1e6901936d2e3e", size = 2340133, upload-time = "2025-11-04T13:40:46.66Z" }, + { url = "https://files.pythonhosted.org/packages/9c/59/013626bf8c78a5a5d9350d12e7697d3d4de951a75565496abd40ccd46bee/pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:915c3d10f81bec3a74fbd4faebe8391013ba61e5a1a8d48c4455b923bdda7858", size = 2324852, upload-time = "2025-11-04T13:40:48.575Z" }, + { url = "https://files.pythonhosted.org/packages/1a/d9/c248c103856f807ef70c18a4f986693a46a8ffe1602e5d361485da502d20/pydantic_core-2.41.5-cp313-cp313-win32.whl", hash = "sha256:650ae77860b45cfa6e2cdafc42618ceafab3a2d9a3811fcfbd3bbf8ac3c40d36", size = 1994679, upload-time = "2025-11-04T13:40:50.619Z" }, + { url = "https://files.pythonhosted.org/packages/9e/8b/341991b158ddab181cff136acd2552c9f35bd30380422a639c0671e99a91/pydantic_core-2.41.5-cp313-cp313-win_amd64.whl", hash = "sha256:79ec52ec461e99e13791ec6508c722742ad745571f234ea6255bed38c6480f11", size = 2019766, upload-time = "2025-11-04T13:40:52.631Z" }, + { url = "https://files.pythonhosted.org/packages/73/7d/f2f9db34af103bea3e09735bb40b021788a5e834c81eedb541991badf8f5/pydantic_core-2.41.5-cp313-cp313-win_arm64.whl", hash = "sha256:3f84d5c1b4ab906093bdc1ff10484838aca54ef08de4afa9de0f5f14d69639cd", size = 1981005, upload-time = "2025-11-04T13:40:54.734Z" }, + { url = "https://files.pythonhosted.org/packages/ea/28/46b7c5c9635ae96ea0fbb779e271a38129df2550f763937659ee6c5dbc65/pydantic_core-2.41.5-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:3f37a19d7ebcdd20b96485056ba9e8b304e27d9904d233d7b1015db320e51f0a", size = 2119622, upload-time = "2025-11-04T13:40:56.68Z" }, + { url = "https://files.pythonhosted.org/packages/74/1a/145646e5687e8d9a1e8d09acb278c8535ebe9e972e1f162ed338a622f193/pydantic_core-2.41.5-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:1d1d9764366c73f996edd17abb6d9d7649a7eb690006ab6adbda117717099b14", size = 1891725, upload-time = "2025-11-04T13:40:58.807Z" }, + { url = "https://files.pythonhosted.org/packages/23/04/e89c29e267b8060b40dca97bfc64a19b2a3cf99018167ea1677d96368273/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25e1c2af0fce638d5f1988b686f3b3ea8cd7de5f244ca147c777769e798a9cd1", size = 1915040, upload-time = "2025-11-04T13:41:00.853Z" }, + { url = "https://files.pythonhosted.org/packages/84/a3/15a82ac7bd97992a82257f777b3583d3e84bdb06ba6858f745daa2ec8a85/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:506d766a8727beef16b7adaeb8ee6217c64fc813646b424d0804d67c16eddb66", size = 2063691, upload-time = "2025-11-04T13:41:03.504Z" }, + { url = "https://files.pythonhosted.org/packages/74/9b/0046701313c6ef08c0c1cf0e028c67c770a4e1275ca73131563c5f2a310a/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4819fa52133c9aa3c387b3328f25c1facc356491e6135b459f1de698ff64d869", size = 2213897, upload-time = "2025-11-04T13:41:05.804Z" }, + { url = "https://files.pythonhosted.org/packages/8a/cd/6bac76ecd1b27e75a95ca3a9a559c643b3afcd2dd62086d4b7a32a18b169/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2b761d210c9ea91feda40d25b4efe82a1707da2ef62901466a42492c028553a2", size = 2333302, upload-time = "2025-11-04T13:41:07.809Z" }, + { url = "https://files.pythonhosted.org/packages/4c/d2/ef2074dc020dd6e109611a8be4449b98cd25e1b9b8a303c2f0fca2f2bcf7/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:22f0fb8c1c583a3b6f24df2470833b40207e907b90c928cc8d3594b76f874375", size = 2064877, upload-time = "2025-11-04T13:41:09.827Z" }, + { url = "https://files.pythonhosted.org/packages/18/66/e9db17a9a763d72f03de903883c057b2592c09509ccfe468187f2a2eef29/pydantic_core-2.41.5-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2782c870e99878c634505236d81e5443092fba820f0373997ff75f90f68cd553", size = 2180680, upload-time = "2025-11-04T13:41:12.379Z" }, + { url = "https://files.pythonhosted.org/packages/d3/9e/3ce66cebb929f3ced22be85d4c2399b8e85b622db77dad36b73c5387f8f8/pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:0177272f88ab8312479336e1d777f6b124537d47f2123f89cb37e0accea97f90", size = 2138960, upload-time = "2025-11-04T13:41:14.627Z" }, + { url = "https://files.pythonhosted.org/packages/a6/62/205a998f4327d2079326b01abee48e502ea739d174f0a89295c481a2272e/pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_armv7l.whl", hash = "sha256:63510af5e38f8955b8ee5687740d6ebf7c2a0886d15a6d65c32814613681bc07", size = 2339102, upload-time = "2025-11-04T13:41:16.868Z" }, + { url = "https://files.pythonhosted.org/packages/3c/0d/f05e79471e889d74d3d88f5bd20d0ed189ad94c2423d81ff8d0000aab4ff/pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:e56ba91f47764cc14f1daacd723e3e82d1a89d783f0f5afe9c364b8bb491ccdb", size = 2326039, upload-time = "2025-11-04T13:41:18.934Z" }, + { url = "https://files.pythonhosted.org/packages/ec/e1/e08a6208bb100da7e0c4b288eed624a703f4d129bde2da475721a80cab32/pydantic_core-2.41.5-cp314-cp314-win32.whl", hash = "sha256:aec5cf2fd867b4ff45b9959f8b20ea3993fc93e63c7363fe6851424c8a7e7c23", size = 1995126, upload-time = "2025-11-04T13:41:21.418Z" }, + { url = "https://files.pythonhosted.org/packages/48/5d/56ba7b24e9557f99c9237e29f5c09913c81eeb2f3217e40e922353668092/pydantic_core-2.41.5-cp314-cp314-win_amd64.whl", hash = "sha256:8e7c86f27c585ef37c35e56a96363ab8de4e549a95512445b85c96d3e2f7c1bf", size = 2015489, upload-time = "2025-11-04T13:41:24.076Z" }, + { url = "https://files.pythonhosted.org/packages/4e/bb/f7a190991ec9e3e0ba22e4993d8755bbc4a32925c0b5b42775c03e8148f9/pydantic_core-2.41.5-cp314-cp314-win_arm64.whl", hash = "sha256:e672ba74fbc2dc8eea59fb6d4aed6845e6905fc2a8afe93175d94a83ba2a01a0", size = 1977288, upload-time = "2025-11-04T13:41:26.33Z" }, + { url = "https://files.pythonhosted.org/packages/92/ed/77542d0c51538e32e15afe7899d79efce4b81eee631d99850edc2f5e9349/pydantic_core-2.41.5-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:8566def80554c3faa0e65ac30ab0932b9e3a5cd7f8323764303d468e5c37595a", size = 2120255, upload-time = "2025-11-04T13:41:28.569Z" }, + { url = "https://files.pythonhosted.org/packages/bb/3d/6913dde84d5be21e284439676168b28d8bbba5600d838b9dca99de0fad71/pydantic_core-2.41.5-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:b80aa5095cd3109962a298ce14110ae16b8c1aece8b72f9dafe81cf597ad80b3", size = 1863760, upload-time = "2025-11-04T13:41:31.055Z" }, + { url = "https://files.pythonhosted.org/packages/5a/f0/e5e6b99d4191da102f2b0eb9687aaa7f5bea5d9964071a84effc3e40f997/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3006c3dd9ba34b0c094c544c6006cc79e87d8612999f1a5d43b769b89181f23c", size = 1878092, upload-time = "2025-11-04T13:41:33.21Z" }, + { url = "https://files.pythonhosted.org/packages/71/48/36fb760642d568925953bcc8116455513d6e34c4beaa37544118c36aba6d/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:72f6c8b11857a856bcfa48c86f5368439f74453563f951e473514579d44aa612", size = 2053385, upload-time = "2025-11-04T13:41:35.508Z" }, + { url = "https://files.pythonhosted.org/packages/20/25/92dc684dd8eb75a234bc1c764b4210cf2646479d54b47bf46061657292a8/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5cb1b2f9742240e4bb26b652a5aeb840aa4b417c7748b6f8387927bc6e45e40d", size = 2218832, upload-time = "2025-11-04T13:41:37.732Z" }, + { url = "https://files.pythonhosted.org/packages/e2/09/f53e0b05023d3e30357d82eb35835d0f6340ca344720a4599cd663dca599/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bd3d54f38609ff308209bd43acea66061494157703364ae40c951f83ba99a1a9", size = 2327585, upload-time = "2025-11-04T13:41:40Z" }, + { url = "https://files.pythonhosted.org/packages/aa/4e/2ae1aa85d6af35a39b236b1b1641de73f5a6ac4d5a7509f77b814885760c/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2ff4321e56e879ee8d2a879501c8e469414d948f4aba74a2d4593184eb326660", size = 2041078, upload-time = "2025-11-04T13:41:42.323Z" }, + { url = "https://files.pythonhosted.org/packages/cd/13/2e215f17f0ef326fc72afe94776edb77525142c693767fc347ed6288728d/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d0d2568a8c11bf8225044aa94409e21da0cb09dcdafe9ecd10250b2baad531a9", size = 2173914, upload-time = "2025-11-04T13:41:45.221Z" }, + { url = "https://files.pythonhosted.org/packages/02/7a/f999a6dcbcd0e5660bc348a3991c8915ce6599f4f2c6ac22f01d7a10816c/pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_aarch64.whl", hash = "sha256:a39455728aabd58ceabb03c90e12f71fd30fa69615760a075b9fec596456ccc3", size = 2129560, upload-time = "2025-11-04T13:41:47.474Z" }, + { url = "https://files.pythonhosted.org/packages/3a/b1/6c990ac65e3b4c079a4fb9f5b05f5b013afa0f4ed6780a3dd236d2cbdc64/pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_armv7l.whl", hash = "sha256:239edca560d05757817c13dc17c50766136d21f7cd0fac50295499ae24f90fdf", size = 2329244, upload-time = "2025-11-04T13:41:49.992Z" }, + { url = "https://files.pythonhosted.org/packages/d9/02/3c562f3a51afd4d88fff8dffb1771b30cfdfd79befd9883ee094f5b6c0d8/pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_x86_64.whl", hash = "sha256:2a5e06546e19f24c6a96a129142a75cee553cc018ffee48a460059b1185f4470", size = 2331955, upload-time = "2025-11-04T13:41:54.079Z" }, + { url = "https://files.pythonhosted.org/packages/5c/96/5fb7d8c3c17bc8c62fdb031c47d77a1af698f1d7a406b0f79aaa1338f9ad/pydantic_core-2.41.5-cp314-cp314t-win32.whl", hash = "sha256:b4ececa40ac28afa90871c2cc2b9ffd2ff0bf749380fbdf57d165fd23da353aa", size = 1988906, upload-time = "2025-11-04T13:41:56.606Z" }, + { url = "https://files.pythonhosted.org/packages/22/ed/182129d83032702912c2e2d8bbe33c036f342cc735737064668585dac28f/pydantic_core-2.41.5-cp314-cp314t-win_amd64.whl", hash = "sha256:80aa89cad80b32a912a65332f64a4450ed00966111b6615ca6816153d3585a8c", size = 1981607, upload-time = "2025-11-04T13:41:58.889Z" }, + { url = "https://files.pythonhosted.org/packages/9f/ed/068e41660b832bb0b1aa5b58011dea2a3fe0ba7861ff38c4d4904c1c1a99/pydantic_core-2.41.5-cp314-cp314t-win_arm64.whl", hash = "sha256:35b44f37a3199f771c3eaa53051bc8a70cd7b54f333531c59e29fd4db5d15008", size = 1974769, upload-time = "2025-11-04T13:42:01.186Z" }, + { url = "https://files.pythonhosted.org/packages/11/72/90fda5ee3b97e51c494938a4a44c3a35a9c96c19bba12372fb9c634d6f57/pydantic_core-2.41.5-graalpy311-graalpy242_311_native-macosx_10_12_x86_64.whl", hash = "sha256:b96d5f26b05d03cc60f11a7761a5ded1741da411e7fe0909e27a5e6a0cb7b034", size = 2115441, upload-time = "2025-11-04T13:42:39.557Z" }, + { url = "https://files.pythonhosted.org/packages/1f/53/8942f884fa33f50794f119012dc6a1a02ac43a56407adaac20463df8e98f/pydantic_core-2.41.5-graalpy311-graalpy242_311_native-macosx_11_0_arm64.whl", hash = "sha256:634e8609e89ceecea15e2d61bc9ac3718caaaa71963717bf3c8f38bfde64242c", size = 1930291, upload-time = "2025-11-04T13:42:42.169Z" }, + { url = "https://files.pythonhosted.org/packages/79/c8/ecb9ed9cd942bce09fc888ee960b52654fbdbede4ba6c2d6e0d3b1d8b49c/pydantic_core-2.41.5-graalpy311-graalpy242_311_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:93e8740d7503eb008aa2df04d3b9735f845d43ae845e6dcd2be0b55a2da43cd2", size = 1948632, upload-time = "2025-11-04T13:42:44.564Z" }, + { url = "https://files.pythonhosted.org/packages/2e/1b/687711069de7efa6af934e74f601e2a4307365e8fdc404703afc453eab26/pydantic_core-2.41.5-graalpy311-graalpy242_311_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f15489ba13d61f670dcc96772e733aad1a6f9c429cc27574c6cdaed82d0146ad", size = 2138905, upload-time = "2025-11-04T13:42:47.156Z" }, + { url = "https://files.pythonhosted.org/packages/09/32/59b0c7e63e277fa7911c2fc70ccfb45ce4b98991e7ef37110663437005af/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-macosx_10_12_x86_64.whl", hash = "sha256:7da7087d756b19037bc2c06edc6c170eeef3c3bafcb8f532ff17d64dc427adfd", size = 2110495, upload-time = "2025-11-04T13:42:49.689Z" }, + { url = "https://files.pythonhosted.org/packages/aa/81/05e400037eaf55ad400bcd318c05bb345b57e708887f07ddb2d20e3f0e98/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-macosx_11_0_arm64.whl", hash = "sha256:aabf5777b5c8ca26f7824cb4a120a740c9588ed58df9b2d196ce92fba42ff8dc", size = 1915388, upload-time = "2025-11-04T13:42:52.215Z" }, + { url = "https://files.pythonhosted.org/packages/6e/0d/e3549b2399f71d56476b77dbf3cf8937cec5cd70536bdc0e374a421d0599/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c007fe8a43d43b3969e8469004e9845944f1a80e6acd47c150856bb87f230c56", size = 1942879, upload-time = "2025-11-04T13:42:56.483Z" }, + { url = "https://files.pythonhosted.org/packages/f7/07/34573da085946b6a313d7c42f82f16e8920bfd730665de2d11c0c37a74b5/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:76d0819de158cd855d1cbb8fcafdf6f5cf1eb8e470abe056d5d161106e38062b", size = 2139017, upload-time = "2025-11-04T13:42:59.471Z" }, + { url = "https://files.pythonhosted.org/packages/e6/b0/1a2aa41e3b5a4ba11420aba2d091b2d17959c8d1519ece3627c371951e73/pydantic_core-2.41.5-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b5819cd790dbf0c5eb9f82c73c16b39a65dd6dd4d1439dcdea7816ec9adddab8", size = 2103351, upload-time = "2025-11-04T13:43:02.058Z" }, + { url = "https://files.pythonhosted.org/packages/a4/ee/31b1f0020baaf6d091c87900ae05c6aeae101fa4e188e1613c80e4f1ea31/pydantic_core-2.41.5-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:5a4e67afbc95fa5c34cf27d9089bca7fcab4e51e57278d710320a70b956d1b9a", size = 1925363, upload-time = "2025-11-04T13:43:05.159Z" }, + { url = "https://files.pythonhosted.org/packages/e1/89/ab8e86208467e467a80deaca4e434adac37b10a9d134cd2f99b28a01e483/pydantic_core-2.41.5-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ece5c59f0ce7d001e017643d8d24da587ea1f74f6993467d85ae8a5ef9d4f42b", size = 2135615, upload-time = "2025-11-04T13:43:08.116Z" }, + { url = "https://files.pythonhosted.org/packages/99/0a/99a53d06dd0348b2008f2f30884b34719c323f16c3be4e6cc1203b74a91d/pydantic_core-2.41.5-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:16f80f7abe3351f8ea6858914ddc8c77e02578544a0ebc15b4c2e1a0e813b0b2", size = 2175369, upload-time = "2025-11-04T13:43:12.49Z" }, + { url = "https://files.pythonhosted.org/packages/6d/94/30ca3b73c6d485b9bb0bc66e611cff4a7138ff9736b7e66bcf0852151636/pydantic_core-2.41.5-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:33cb885e759a705b426baada1fe68cbb0a2e68e34c5d0d0289a364cf01709093", size = 2144218, upload-time = "2025-11-04T13:43:15.431Z" }, + { url = "https://files.pythonhosted.org/packages/87/57/31b4f8e12680b739a91f472b5671294236b82586889ef764b5fbc6669238/pydantic_core-2.41.5-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:c8d8b4eb992936023be7dee581270af5c6e0697a8559895f527f5b7105ecd36a", size = 2329951, upload-time = "2025-11-04T13:43:18.062Z" }, + { url = "https://files.pythonhosted.org/packages/7d/73/3c2c8edef77b8f7310e6fb012dbc4b8551386ed575b9eb6fb2506e28a7eb/pydantic_core-2.41.5-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:242a206cd0318f95cd21bdacff3fcc3aab23e79bba5cac3db5a841c9ef9c6963", size = 2318428, upload-time = "2025-11-04T13:43:20.679Z" }, + { url = "https://files.pythonhosted.org/packages/2f/02/8559b1f26ee0d502c74f9cca5c0d2fd97e967e083e006bbbb4e97f3a043a/pydantic_core-2.41.5-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:d3a978c4f57a597908b7e697229d996d77a6d3c94901e9edee593adada95ce1a", size = 2147009, upload-time = "2025-11-04T13:43:23.286Z" }, + { url = "https://files.pythonhosted.org/packages/5f/9b/1b3f0e9f9305839d7e84912f9e8bfbd191ed1b1ef48083609f0dabde978c/pydantic_core-2.41.5-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b2379fa7ed44ddecb5bfe4e48577d752db9fc10be00a6b7446e9663ba143de26", size = 2101980, upload-time = "2025-11-04T13:43:25.97Z" }, + { url = "https://files.pythonhosted.org/packages/a4/ed/d71fefcb4263df0da6a85b5d8a7508360f2f2e9b3bf5814be9c8bccdccc1/pydantic_core-2.41.5-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:266fb4cbf5e3cbd0b53669a6d1b039c45e3ce651fd5442eff4d07c2cc8d66808", size = 1923865, upload-time = "2025-11-04T13:43:28.763Z" }, + { url = "https://files.pythonhosted.org/packages/ce/3a/626b38db460d675f873e4444b4bb030453bbe7b4ba55df821d026a0493c4/pydantic_core-2.41.5-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58133647260ea01e4d0500089a8c4f07bd7aa6ce109682b1426394988d8aaacc", size = 2134256, upload-time = "2025-11-04T13:43:31.71Z" }, + { url = "https://files.pythonhosted.org/packages/83/d9/8412d7f06f616bbc053d30cb4e5f76786af3221462ad5eee1f202021eb4e/pydantic_core-2.41.5-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:287dad91cfb551c363dc62899a80e9e14da1f0e2b6ebde82c806612ca2a13ef1", size = 2174762, upload-time = "2025-11-04T13:43:34.744Z" }, + { url = "https://files.pythonhosted.org/packages/55/4c/162d906b8e3ba3a99354e20faa1b49a85206c47de97a639510a0e673f5da/pydantic_core-2.41.5-pp311-pypy311_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:03b77d184b9eb40240ae9fd676ca364ce1085f203e1b1256f8ab9984dca80a84", size = 2143141, upload-time = "2025-11-04T13:43:37.701Z" }, + { url = "https://files.pythonhosted.org/packages/1f/f2/f11dd73284122713f5f89fc940f370d035fa8e1e078d446b3313955157fe/pydantic_core-2.41.5-pp311-pypy311_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:a668ce24de96165bb239160b3d854943128f4334822900534f2fe947930e5770", size = 2330317, upload-time = "2025-11-04T13:43:40.406Z" }, + { url = "https://files.pythonhosted.org/packages/88/9d/b06ca6acfe4abb296110fb1273a4d848a0bfb2ff65f3ee92127b3244e16b/pydantic_core-2.41.5-pp311-pypy311_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f14f8f046c14563f8eb3f45f499cc658ab8d10072961e07225e507adb700e93f", size = 2316992, upload-time = "2025-11-04T13:43:43.602Z" }, + { url = "https://files.pythonhosted.org/packages/36/c7/cfc8e811f061c841d7990b0201912c3556bfeb99cdcb7ed24adc8d6f8704/pydantic_core-2.41.5-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:56121965f7a4dc965bff783d70b907ddf3d57f6eba29b6d2e5dabfaf07799c51", size = 2145302, upload-time = "2025-11-04T13:43:46.64Z" }, +] + +[[package]] +name = "pygments" +version = "2.19.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b0/77/a5b8c569bf593b0140bde72ea885a803b82086995367bf2037de0159d924/pygments-2.19.2.tar.gz", hash = "sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887", size = 4968631, upload-time = "2025-06-21T13:39:12.283Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c7/21/705964c7812476f378728bdf590ca4b771ec72385c533964653c68e86bdc/pygments-2.19.2-py3-none-any.whl", hash = "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b", size = 1225217, upload-time = "2025-06-21T13:39:07.939Z" }, +] + +[[package]] +name = "pylint" +version = "3.2.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "astroid" }, + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "dill" }, + { name = "isort" }, + { name = "mccabe" }, + { name = "platformdirs" }, + { name = "tomli", marker = "python_full_version < '3.11'" }, + { name = "tomlkit" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/9a/e9/60280b14cc1012794120345ce378504cf17409e38cd88f455dc24e0ad6b5/pylint-3.2.3.tar.gz", hash = "sha256:02f6c562b215582386068d52a30f520d84fdbcf2a95fc7e855b816060d048b60", size = 1506739, upload-time = "2024-06-06T14:19:17.955Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/50/d3/d346f779cbc9384d8b805a7557b5f2b8ee9f842bffebec9fc6364d6ae183/pylint-3.2.3-py3-none-any.whl", hash = "sha256:b3d7d2708a3e04b4679e02d99e72329a8b7ee8afb8d04110682278781f889fa8", size = 519244, upload-time = "2024-06-06T14:19:13.228Z" }, +] + +[[package]] +name = "pyright" +version = "1.1.408" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "nodeenv" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/74/b2/5db700e52554b8f025faa9c3c624c59f1f6c8841ba81ab97641b54322f16/pyright-1.1.408.tar.gz", hash = "sha256:f28f2321f96852fa50b5829ea492f6adb0e6954568d1caa3f3af3a5f555eb684", size = 4400578, upload-time = "2026-01-08T08:07:38.795Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0c/82/a2c93e32800940d9573fb28c346772a14778b84ba7524e691b324620ab89/pyright-1.1.408-py3-none-any.whl", hash = "sha256:090b32865f4fdb1e0e6cd82bf5618480d48eecd2eb2e70f960982a3d9a4c17c1", size = 6399144, upload-time = "2026-01-08T08:07:37.082Z" }, +] + +[[package]] +name = "pytest" +version = "8.4.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "exceptiongroup", marker = "python_full_version < '3.11'" }, + { name = "iniconfig" }, + { name = "packaging" }, + { name = "pluggy" }, + { name = "pygments" }, + { name = "tomli", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a3/5c/00a0e072241553e1a7496d638deababa67c5058571567b92a7eaa258397c/pytest-8.4.2.tar.gz", hash = "sha256:86c0d0b93306b961d58d62a4db4879f27fe25513d4b969df351abdddb3c30e01", size = 1519618, upload-time = "2025-09-04T14:34:22.711Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a8/a4/20da314d277121d6534b3a980b29035dcd51e6744bd79075a6ce8fa4eb8d/pytest-8.4.2-py3-none-any.whl", hash = "sha256:872f880de3fc3a5bdc88a11b39c9710c3497a547cfa9320bc3c5e62fbf272e79", size = 365750, upload-time = "2025-09-04T14:34:20.226Z" }, +] + +[[package]] +name = "pytest-asyncio" +version = "0.23.8" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pytest" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/de/b4/0b378b7bf26a8ae161c3890c0b48a91a04106c5713ce81b4b080ea2f4f18/pytest_asyncio-0.23.8.tar.gz", hash = "sha256:759b10b33a6dc61cce40a8bd5205e302978bbbcc00e279a8b61d9a6a3c82e4d3", size = 46920, upload-time = "2024-07-17T17:39:34.617Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ee/82/62e2d63639ecb0fbe8a7ee59ef0bc69a4669ec50f6d3459f74ad4e4189a2/pytest_asyncio-0.23.8-py3-none-any.whl", hash = "sha256:50265d892689a5faefb84df80819d1ecef566eb3549cf915dfb33569359d1ce2", size = 17663, upload-time = "2024-07-17T17:39:32.478Z" }, +] + +[[package]] +name = "python-dateutil" +version = "2.9.0.post0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "six" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/66/c0/0c8b6ad9f17a802ee498c46e004a0eb49bc148f2fd230864601a86dcf6db/python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3", size = 342432, upload-time = "2024-03-01T18:36:20.211Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ec/57/56b9bcc3c9c6a792fcbaf139543cee77261f3651ca9da0c93f5c1221264b/python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427", size = 229892, upload-time = "2024-03-01T18:36:18.57Z" }, +] + +[[package]] +name = "requests" +version = "2.32.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "charset-normalizer" }, + { name = "idna" }, + { name = "urllib3" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c9/74/b3ff8e6c8446842c3f5c837e9c3dfcfe2018ea6ecef224c710c85ef728f4/requests-2.32.5.tar.gz", hash = "sha256:dbba0bac56e100853db0ea71b82b4dfd5fe2bf6d3754a8893c3af500cec7d7cf", size = 134517, upload-time = "2025-08-18T20:46:02.573Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1e/db/4254e3eabe8020b458f1a747140d32277ec7a271daf1d235b70dc0b4e6e3/requests-2.32.5-py3-none-any.whl", hash = "sha256:2462f94637a34fd532264295e186976db0f5d453d1cdd31473c85a6a161affb6", size = 64738, upload-time = "2025-08-18T20:46:00.542Z" }, +] + +[[package]] +name = "rsa" +version = "4.9.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pyasn1" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/da/8a/22b7beea3ee0d44b1916c0c1cb0ee3af23b700b6da9f04991899d0c555d4/rsa-4.9.1.tar.gz", hash = "sha256:e7bdbfdb5497da4c07dfd35530e1a902659db6ff241e39d9953cad06ebd0ae75", size = 29034, upload-time = "2025-04-16T09:51:18.218Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/64/8d/0133e4eb4beed9e425d9a98ed6e081a55d195481b7632472be1af08d2f6b/rsa-4.9.1-py3-none-any.whl", hash = "sha256:68635866661c6836b8d39430f97a996acbd61bfa49406748ea243539fe239762", size = 34696, upload-time = "2025-04-16T09:51:17.142Z" }, +] + +[[package]] +name = "six" +version = "1.17.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/94/e7/b2c673351809dca68a0e064b6af791aa332cf192da575fd474ed7d6f16a2/six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81", size = 34031, upload-time = "2024-12-04T17:35:28.174Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b7/ce/149a00dd41f10bc29e5921b496af8b574d8413afcd5e30dfa0ed46c2cc5e/six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274", size = 11050, upload-time = "2024-12-04T17:35:26.475Z" }, +] + +[[package]] +name = "tomli" +version = "2.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/52/ed/3f73f72945444548f33eba9a87fc7a6e969915e7b1acc8260b30e1f76a2f/tomli-2.3.0.tar.gz", hash = "sha256:64be704a875d2a59753d80ee8a533c3fe183e3f06807ff7dc2232938ccb01549", size = 17392, upload-time = "2025-10-08T22:01:47.119Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b3/2e/299f62b401438d5fe1624119c723f5d877acc86a4c2492da405626665f12/tomli-2.3.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:88bd15eb972f3664f5ed4b57c1634a97153b4bac4479dcb6a495f41921eb7f45", size = 153236, upload-time = "2025-10-08T22:01:00.137Z" }, + { url = "https://files.pythonhosted.org/packages/86/7f/d8fffe6a7aefdb61bced88fcb5e280cfd71e08939da5894161bd71bea022/tomli-2.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:883b1c0d6398a6a9d29b508c331fa56adbcdff647f6ace4dfca0f50e90dfd0ba", size = 148084, upload-time = "2025-10-08T22:01:01.63Z" }, + { url = "https://files.pythonhosted.org/packages/47/5c/24935fb6a2ee63e86d80e4d3b58b222dafaf438c416752c8b58537c8b89a/tomli-2.3.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d1381caf13ab9f300e30dd8feadb3de072aeb86f1d34a8569453ff32a7dea4bf", size = 234832, upload-time = "2025-10-08T22:01:02.543Z" }, + { url = "https://files.pythonhosted.org/packages/89/da/75dfd804fc11e6612846758a23f13271b76d577e299592b4371a4ca4cd09/tomli-2.3.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a0e285d2649b78c0d9027570d4da3425bdb49830a6156121360b3f8511ea3441", size = 242052, upload-time = "2025-10-08T22:01:03.836Z" }, + { url = "https://files.pythonhosted.org/packages/70/8c/f48ac899f7b3ca7eb13af73bacbc93aec37f9c954df3c08ad96991c8c373/tomli-2.3.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:0a154a9ae14bfcf5d8917a59b51ffd5a3ac1fd149b71b47a3a104ca4edcfa845", size = 239555, upload-time = "2025-10-08T22:01:04.834Z" }, + { url = "https://files.pythonhosted.org/packages/ba/28/72f8afd73f1d0e7829bfc093f4cb98ce0a40ffc0cc997009ee1ed94ba705/tomli-2.3.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:74bf8464ff93e413514fefd2be591c3b0b23231a77f901db1eb30d6f712fc42c", size = 245128, upload-time = "2025-10-08T22:01:05.84Z" }, + { url = "https://files.pythonhosted.org/packages/b6/eb/a7679c8ac85208706d27436e8d421dfa39d4c914dcf5fa8083a9305f58d9/tomli-2.3.0-cp311-cp311-win32.whl", hash = "sha256:00b5f5d95bbfc7d12f91ad8c593a1659b6387b43f054104cda404be6bda62456", size = 96445, upload-time = "2025-10-08T22:01:06.896Z" }, + { url = "https://files.pythonhosted.org/packages/0a/fe/3d3420c4cb1ad9cb462fb52967080575f15898da97e21cb6f1361d505383/tomli-2.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:4dc4ce8483a5d429ab602f111a93a6ab1ed425eae3122032db7e9acf449451be", size = 107165, upload-time = "2025-10-08T22:01:08.107Z" }, + { url = "https://files.pythonhosted.org/packages/ff/b7/40f36368fcabc518bb11c8f06379a0fd631985046c038aca08c6d6a43c6e/tomli-2.3.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d7d86942e56ded512a594786a5ba0a5e521d02529b3826e7761a05138341a2ac", size = 154891, upload-time = "2025-10-08T22:01:09.082Z" }, + { url = "https://files.pythonhosted.org/packages/f9/3f/d9dd692199e3b3aab2e4e4dd948abd0f790d9ded8cd10cbaae276a898434/tomli-2.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:73ee0b47d4dad1c5e996e3cd33b8a76a50167ae5f96a2607cbe8cc773506ab22", size = 148796, upload-time = "2025-10-08T22:01:10.266Z" }, + { url = "https://files.pythonhosted.org/packages/60/83/59bff4996c2cf9f9387a0f5a3394629c7efa5ef16142076a23a90f1955fa/tomli-2.3.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:792262b94d5d0a466afb5bc63c7daa9d75520110971ee269152083270998316f", size = 242121, upload-time = "2025-10-08T22:01:11.332Z" }, + { url = "https://files.pythonhosted.org/packages/45/e5/7c5119ff39de8693d6baab6c0b6dcb556d192c165596e9fc231ea1052041/tomli-2.3.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4f195fe57ecceac95a66a75ac24d9d5fbc98ef0962e09b2eddec5d39375aae52", size = 250070, upload-time = "2025-10-08T22:01:12.498Z" }, + { url = "https://files.pythonhosted.org/packages/45/12/ad5126d3a278f27e6701abde51d342aa78d06e27ce2bb596a01f7709a5a2/tomli-2.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e31d432427dcbf4d86958c184b9bfd1e96b5b71f8eb17e6d02531f434fd335b8", size = 245859, upload-time = "2025-10-08T22:01:13.551Z" }, + { url = "https://files.pythonhosted.org/packages/fb/a1/4d6865da6a71c603cfe6ad0e6556c73c76548557a8d658f9e3b142df245f/tomli-2.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:7b0882799624980785240ab732537fcfc372601015c00f7fc367c55308c186f6", size = 250296, upload-time = "2025-10-08T22:01:14.614Z" }, + { url = "https://files.pythonhosted.org/packages/a0/b7/a7a7042715d55c9ba6e8b196d65d2cb662578b4d8cd17d882d45322b0d78/tomli-2.3.0-cp312-cp312-win32.whl", hash = "sha256:ff72b71b5d10d22ecb084d345fc26f42b5143c5533db5e2eaba7d2d335358876", size = 97124, upload-time = "2025-10-08T22:01:15.629Z" }, + { url = "https://files.pythonhosted.org/packages/06/1e/f22f100db15a68b520664eb3328fb0ae4e90530887928558112c8d1f4515/tomli-2.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:1cb4ed918939151a03f33d4242ccd0aa5f11b3547d0cf30f7c74a408a5b99878", size = 107698, upload-time = "2025-10-08T22:01:16.51Z" }, + { url = "https://files.pythonhosted.org/packages/89/48/06ee6eabe4fdd9ecd48bf488f4ac783844fd777f547b8d1b61c11939974e/tomli-2.3.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:5192f562738228945d7b13d4930baffda67b69425a7f0da96d360b0a3888136b", size = 154819, upload-time = "2025-10-08T22:01:17.964Z" }, + { url = "https://files.pythonhosted.org/packages/f1/01/88793757d54d8937015c75dcdfb673c65471945f6be98e6a0410fba167ed/tomli-2.3.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:be71c93a63d738597996be9528f4abe628d1adf5e6eb11607bc8fe1a510b5dae", size = 148766, upload-time = "2025-10-08T22:01:18.959Z" }, + { url = "https://files.pythonhosted.org/packages/42/17/5e2c956f0144b812e7e107f94f1cc54af734eb17b5191c0bbfb72de5e93e/tomli-2.3.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c4665508bcbac83a31ff8ab08f424b665200c0e1e645d2bd9ab3d3e557b6185b", size = 240771, upload-time = "2025-10-08T22:01:20.106Z" }, + { url = "https://files.pythonhosted.org/packages/d5/f4/0fbd014909748706c01d16824eadb0307115f9562a15cbb012cd9b3512c5/tomli-2.3.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4021923f97266babc6ccab9f5068642a0095faa0a51a246a6a02fccbb3514eaf", size = 248586, upload-time = "2025-10-08T22:01:21.164Z" }, + { url = "https://files.pythonhosted.org/packages/30/77/fed85e114bde5e81ecf9bc5da0cc69f2914b38f4708c80ae67d0c10180c5/tomli-2.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a4ea38c40145a357d513bffad0ed869f13c1773716cf71ccaa83b0fa0cc4e42f", size = 244792, upload-time = "2025-10-08T22:01:22.417Z" }, + { url = "https://files.pythonhosted.org/packages/55/92/afed3d497f7c186dc71e6ee6d4fcb0acfa5f7d0a1a2878f8beae379ae0cc/tomli-2.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ad805ea85eda330dbad64c7ea7a4556259665bdf9d2672f5dccc740eb9d3ca05", size = 248909, upload-time = "2025-10-08T22:01:23.859Z" }, + { url = "https://files.pythonhosted.org/packages/f8/84/ef50c51b5a9472e7265ce1ffc7f24cd4023d289e109f669bdb1553f6a7c2/tomli-2.3.0-cp313-cp313-win32.whl", hash = "sha256:97d5eec30149fd3294270e889b4234023f2c69747e555a27bd708828353ab606", size = 96946, upload-time = "2025-10-08T22:01:24.893Z" }, + { url = "https://files.pythonhosted.org/packages/b2/b7/718cd1da0884f281f95ccfa3a6cc572d30053cba64603f79d431d3c9b61b/tomli-2.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:0c95ca56fbe89e065c6ead5b593ee64b84a26fca063b5d71a1122bf26e533999", size = 107705, upload-time = "2025-10-08T22:01:26.153Z" }, + { url = "https://files.pythonhosted.org/packages/19/94/aeafa14a52e16163008060506fcb6aa1949d13548d13752171a755c65611/tomli-2.3.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:cebc6fe843e0733ee827a282aca4999b596241195f43b4cc371d64fc6639da9e", size = 154244, upload-time = "2025-10-08T22:01:27.06Z" }, + { url = "https://files.pythonhosted.org/packages/db/e4/1e58409aa78eefa47ccd19779fc6f36787edbe7d4cd330eeeedb33a4515b/tomli-2.3.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:4c2ef0244c75aba9355561272009d934953817c49f47d768070c3c94355c2aa3", size = 148637, upload-time = "2025-10-08T22:01:28.059Z" }, + { url = "https://files.pythonhosted.org/packages/26/b6/d1eccb62f665e44359226811064596dd6a366ea1f985839c566cd61525ae/tomli-2.3.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c22a8bf253bacc0cf11f35ad9808b6cb75ada2631c2d97c971122583b129afbc", size = 241925, upload-time = "2025-10-08T22:01:29.066Z" }, + { url = "https://files.pythonhosted.org/packages/70/91/7cdab9a03e6d3d2bb11beae108da5bdc1c34bdeb06e21163482544ddcc90/tomli-2.3.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0eea8cc5c5e9f89c9b90c4896a8deefc74f518db5927d0e0e8d4a80953d774d0", size = 249045, upload-time = "2025-10-08T22:01:31.98Z" }, + { url = "https://files.pythonhosted.org/packages/15/1b/8c26874ed1f6e4f1fcfeb868db8a794cbe9f227299402db58cfcc858766c/tomli-2.3.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:b74a0e59ec5d15127acdabd75ea17726ac4c5178ae51b85bfe39c4f8a278e879", size = 245835, upload-time = "2025-10-08T22:01:32.989Z" }, + { url = "https://files.pythonhosted.org/packages/fd/42/8e3c6a9a4b1a1360c1a2a39f0b972cef2cc9ebd56025168c4137192a9321/tomli-2.3.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:b5870b50c9db823c595983571d1296a6ff3e1b88f734a4c8f6fc6188397de005", size = 253109, upload-time = "2025-10-08T22:01:34.052Z" }, + { url = "https://files.pythonhosted.org/packages/22/0c/b4da635000a71b5f80130937eeac12e686eefb376b8dee113b4a582bba42/tomli-2.3.0-cp314-cp314-win32.whl", hash = "sha256:feb0dacc61170ed7ab602d3d972a58f14ee3ee60494292d384649a3dc38ef463", size = 97930, upload-time = "2025-10-08T22:01:35.082Z" }, + { url = "https://files.pythonhosted.org/packages/b9/74/cb1abc870a418ae99cd5c9547d6bce30701a954e0e721821df483ef7223c/tomli-2.3.0-cp314-cp314-win_amd64.whl", hash = "sha256:b273fcbd7fc64dc3600c098e39136522650c49bca95df2d11cf3b626422392c8", size = 107964, upload-time = "2025-10-08T22:01:36.057Z" }, + { url = "https://files.pythonhosted.org/packages/54/78/5c46fff6432a712af9f792944f4fcd7067d8823157949f4e40c56b8b3c83/tomli-2.3.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:940d56ee0410fa17ee1f12b817b37a4d4e4dc4d27340863cc67236c74f582e77", size = 163065, upload-time = "2025-10-08T22:01:37.27Z" }, + { url = "https://files.pythonhosted.org/packages/39/67/f85d9bd23182f45eca8939cd2bc7050e1f90c41f4a2ecbbd5963a1d1c486/tomli-2.3.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:f85209946d1fe94416debbb88d00eb92ce9cd5266775424ff81bc959e001acaf", size = 159088, upload-time = "2025-10-08T22:01:38.235Z" }, + { url = "https://files.pythonhosted.org/packages/26/5a/4b546a0405b9cc0659b399f12b6adb750757baf04250b148d3c5059fc4eb/tomli-2.3.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a56212bdcce682e56b0aaf79e869ba5d15a6163f88d5451cbde388d48b13f530", size = 268193, upload-time = "2025-10-08T22:01:39.712Z" }, + { url = "https://files.pythonhosted.org/packages/42/4f/2c12a72ae22cf7b59a7fe75b3465b7aba40ea9145d026ba41cb382075b0e/tomli-2.3.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c5f3ffd1e098dfc032d4d3af5c0ac64f6d286d98bc148698356847b80fa4de1b", size = 275488, upload-time = "2025-10-08T22:01:40.773Z" }, + { url = "https://files.pythonhosted.org/packages/92/04/a038d65dbe160c3aa5a624e93ad98111090f6804027d474ba9c37c8ae186/tomli-2.3.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:5e01decd096b1530d97d5d85cb4dff4af2d8347bd35686654a004f8dea20fc67", size = 272669, upload-time = "2025-10-08T22:01:41.824Z" }, + { url = "https://files.pythonhosted.org/packages/be/2f/8b7c60a9d1612a7cbc39ffcca4f21a73bf368a80fc25bccf8253e2563267/tomli-2.3.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:8a35dd0e643bb2610f156cca8db95d213a90015c11fee76c946aa62b7ae7e02f", size = 279709, upload-time = "2025-10-08T22:01:43.177Z" }, + { url = "https://files.pythonhosted.org/packages/7e/46/cc36c679f09f27ded940281c38607716c86cf8ba4a518d524e349c8b4874/tomli-2.3.0-cp314-cp314t-win32.whl", hash = "sha256:a1f7f282fe248311650081faafa5f4732bdbfef5d45fe3f2e702fbc6f2d496e0", size = 107563, upload-time = "2025-10-08T22:01:44.233Z" }, + { url = "https://files.pythonhosted.org/packages/84/ff/426ca8683cf7b753614480484f6437f568fd2fda2edbdf57a2d3d8b27a0b/tomli-2.3.0-cp314-cp314t-win_amd64.whl", hash = "sha256:70a251f8d4ba2d9ac2542eecf008b3c8a9fc5c3f9f02c56a9d7952612be2fdba", size = 119756, upload-time = "2025-10-08T22:01:45.234Z" }, + { url = "https://files.pythonhosted.org/packages/77/b8/0135fadc89e73be292b473cb820b4f5a08197779206b33191e801feeae40/tomli-2.3.0-py3-none-any.whl", hash = "sha256:e95b1af3c5b07d9e643909b5abbec77cd9f1217e6d0bca72b0234736b9fb1f1b", size = 14408, upload-time = "2025-10-08T22:01:46.04Z" }, +] + +[[package]] +name = "tomlkit" +version = "0.13.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/cc/18/0bbf3884e9eaa38819ebe46a7bd25dcd56b67434402b66a58c4b8e552575/tomlkit-0.13.3.tar.gz", hash = "sha256:430cf247ee57df2b94ee3fbe588e71d362a941ebb545dec29b53961d61add2a1", size = 185207, upload-time = "2025-06-05T07:13:44.947Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bd/75/8539d011f6be8e29f339c42e633aae3cb73bffa95dd0f9adec09b9c58e85/tomlkit-0.13.3-py3-none-any.whl", hash = "sha256:c89c649d79ee40629a9fda55f8ace8c6a1b42deb912b2a8fd8d942ddadb606b0", size = 38901, upload-time = "2025-06-05T07:13:43.546Z" }, +] + +[[package]] +name = "types-python-dateutil" +version = "2.9.0.20251115" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6a/36/06d01fb52c0d57e9ad0c237654990920fa41195e4b3d640830dabf9eeb2f/types_python_dateutil-2.9.0.20251115.tar.gz", hash = "sha256:8a47f2c3920f52a994056b8786309b43143faa5a64d4cbb2722d6addabdf1a58", size = 16363, upload-time = "2025-11-15T03:00:13.717Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/43/0b/56961d3ba517ed0df9b3a27bfda6514f3d01b28d499d1bce9068cfe4edd1/types_python_dateutil-2.9.0.20251115-py3-none-any.whl", hash = "sha256:9cf9c1c582019753b8639a081deefd7e044b9fa36bd8217f565c6c4e36ee0624", size = 18251, upload-time = "2025-11-15T03:00:12.317Z" }, +] + +[[package]] +name = "typing-extensions" +version = "4.15.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/72/94/1a15dd82efb362ac84269196e94cf00f187f7ed21c242792a923cdb1c61f/typing_extensions-4.15.0.tar.gz", hash = "sha256:0cea48d173cc12fa28ecabc3b837ea3cf6f38c6d1136f85cbaaf598984861466", size = 109391, upload-time = "2025-08-25T13:49:26.313Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/18/67/36e9267722cc04a6b9f15c7f3441c2363321a3ea07da7ae0c0707beb2a9c/typing_extensions-4.15.0-py3-none-any.whl", hash = "sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548", size = 44614, upload-time = "2025-08-25T13:49:24.86Z" }, +] + +[[package]] +name = "typing-inspection" +version = "0.4.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/55/e3/70399cb7dd41c10ac53367ae42139cf4b1ca5f36bb3dc6c9d33acdb43655/typing_inspection-0.4.2.tar.gz", hash = "sha256:ba561c48a67c5958007083d386c3295464928b01faa735ab8547c5692e87f464", size = 75949, upload-time = "2025-10-01T02:14:41.687Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/dc/9b/47798a6c91d8bdb567fe2698fe81e0c6b7cb7ef4d13da4114b41d239f65d/typing_inspection-0.4.2-py3-none-any.whl", hash = "sha256:4ed1cacbdc298c220f1bd249ed5287caa16f34d44ef4e9c3d0cbad5b521545e7", size = 14611, upload-time = "2025-10-01T02:14:40.154Z" }, +] + +[[package]] +name = "urllib3" +version = "2.6.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/1e/24/a2a2ed9addd907787d7aa0355ba36a6cadf1768b934c652ea78acbd59dcd/urllib3-2.6.2.tar.gz", hash = "sha256:016f9c98bb7e98085cb2b4b17b87d2c702975664e4f060c6532e64d1c1a5e797", size = 432930, upload-time = "2025-12-11T15:56:40.252Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6d/b9/4095b668ea3678bf6a0af005527f39de12fb026516fb3df17495a733b7f8/urllib3-2.6.2-py3-none-any.whl", hash = "sha256:ec21cddfe7724fc7cb4ba4bea7aa8e2ef36f607a4bab81aa6ce42a13dc3f03dd", size = 131182, upload-time = "2025-12-11T15:56:38.584Z" }, +] diff --git a/poetry.lock b/poetry.lock deleted file mode 100644 index 8717942a..00000000 --- a/poetry.lock +++ /dev/null @@ -1,899 +0,0 @@ -# This file is automatically @generated by Poetry 1.6.1 and should not be changed by hand. - -[[package]] -name = "aiohttp" -version = "3.9.1" -description = "Async http client/server framework (asyncio)" -optional = false -python-versions = ">=3.8" -files = [ - {file = "aiohttp-3.9.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e1f80197f8b0b846a8d5cf7b7ec6084493950d0882cc5537fb7b96a69e3c8590"}, - {file = "aiohttp-3.9.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c72444d17777865734aa1a4d167794c34b63e5883abb90356a0364a28904e6c0"}, - {file = "aiohttp-3.9.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9b05d5cbe9dafcdc733262c3a99ccf63d2f7ce02543620d2bd8db4d4f7a22f83"}, - {file = "aiohttp-3.9.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c4fa235d534b3547184831c624c0b7c1e262cd1de847d95085ec94c16fddcd5"}, - {file = "aiohttp-3.9.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:289ba9ae8e88d0ba16062ecf02dd730b34186ea3b1e7489046fc338bdc3361c4"}, - {file = "aiohttp-3.9.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bff7e2811814fa2271be95ab6e84c9436d027a0e59665de60edf44e529a42c1f"}, - {file = "aiohttp-3.9.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:81b77f868814346662c96ab36b875d7814ebf82340d3284a31681085c051320f"}, - {file = "aiohttp-3.9.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3b9c7426923bb7bd66d409da46c41e3fb40f5caf679da624439b9eba92043fa6"}, - {file = "aiohttp-3.9.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:8d44e7bf06b0c0a70a20f9100af9fcfd7f6d9d3913e37754c12d424179b4e48f"}, - {file = "aiohttp-3.9.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:22698f01ff5653fe66d16ffb7658f582a0ac084d7da1323e39fd9eab326a1f26"}, - {file = "aiohttp-3.9.1-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:ca7ca5abfbfe8d39e653870fbe8d7710be7a857f8a8386fc9de1aae2e02ce7e4"}, - {file = "aiohttp-3.9.1-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:8d7f98fde213f74561be1d6d3fa353656197f75d4edfbb3d94c9eb9b0fc47f5d"}, - {file = "aiohttp-3.9.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:5216b6082c624b55cfe79af5d538e499cd5f5b976820eac31951fb4325974501"}, - {file = "aiohttp-3.9.1-cp310-cp310-win32.whl", hash = "sha256:0e7ba7ff228c0d9a2cd66194e90f2bca6e0abca810b786901a569c0de082f489"}, - {file = "aiohttp-3.9.1-cp310-cp310-win_amd64.whl", hash = "sha256:c7e939f1ae428a86e4abbb9a7c4732bf4706048818dfd979e5e2839ce0159f23"}, - {file = "aiohttp-3.9.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:df9cf74b9bc03d586fc53ba470828d7b77ce51b0582d1d0b5b2fb673c0baa32d"}, - {file = "aiohttp-3.9.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ecca113f19d5e74048c001934045a2b9368d77b0b17691d905af18bd1c21275e"}, - {file = "aiohttp-3.9.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8cef8710fb849d97c533f259103f09bac167a008d7131d7b2b0e3a33269185c0"}, - {file = "aiohttp-3.9.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bea94403a21eb94c93386d559bce297381609153e418a3ffc7d6bf772f59cc35"}, - {file = "aiohttp-3.9.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:91c742ca59045dce7ba76cab6e223e41d2c70d79e82c284a96411f8645e2afff"}, - {file = "aiohttp-3.9.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6c93b7c2e52061f0925c3382d5cb8980e40f91c989563d3d32ca280069fd6a87"}, - {file = "aiohttp-3.9.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ee2527134f95e106cc1653e9ac78846f3a2ec1004cf20ef4e02038035a74544d"}, - {file = "aiohttp-3.9.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:11ff168d752cb41e8492817e10fb4f85828f6a0142b9726a30c27c35a1835f01"}, - {file = "aiohttp-3.9.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:b8c3a67eb87394386847d188996920f33b01b32155f0a94f36ca0e0c635bf3e3"}, - {file = "aiohttp-3.9.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:c7b5d5d64e2a14e35a9240b33b89389e0035e6de8dbb7ffa50d10d8b65c57449"}, - {file = "aiohttp-3.9.1-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:69985d50a2b6f709412d944ffb2e97d0be154ea90600b7a921f95a87d6f108a2"}, - {file = "aiohttp-3.9.1-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:c9110c06eaaac7e1f5562caf481f18ccf8f6fdf4c3323feab28a93d34cc646bd"}, - {file = "aiohttp-3.9.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d737e69d193dac7296365a6dcb73bbbf53bb760ab25a3727716bbd42022e8d7a"}, - {file = "aiohttp-3.9.1-cp311-cp311-win32.whl", hash = "sha256:4ee8caa925aebc1e64e98432d78ea8de67b2272252b0a931d2ac3bd876ad5544"}, - {file = "aiohttp-3.9.1-cp311-cp311-win_amd64.whl", hash = "sha256:a34086c5cc285be878622e0a6ab897a986a6e8bf5b67ecb377015f06ed316587"}, - {file = "aiohttp-3.9.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:f800164276eec54e0af5c99feb9494c295118fc10a11b997bbb1348ba1a52065"}, - {file = "aiohttp-3.9.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:500f1c59906cd142d452074f3811614be04819a38ae2b3239a48b82649c08821"}, - {file = "aiohttp-3.9.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0b0a6a36ed7e164c6df1e18ee47afbd1990ce47cb428739d6c99aaabfaf1b3af"}, - {file = "aiohttp-3.9.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69da0f3ed3496808e8cbc5123a866c41c12c15baaaead96d256477edf168eb57"}, - {file = "aiohttp-3.9.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:176df045597e674fa950bf5ae536be85699e04cea68fa3a616cf75e413737eb5"}, - {file = "aiohttp-3.9.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b796b44111f0cab6bbf66214186e44734b5baab949cb5fb56154142a92989aeb"}, - {file = "aiohttp-3.9.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f27fdaadce22f2ef950fc10dcdf8048407c3b42b73779e48a4e76b3c35bca26c"}, - {file = "aiohttp-3.9.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bcb6532b9814ea7c5a6a3299747c49de30e84472fa72821b07f5a9818bce0f66"}, - {file = "aiohttp-3.9.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:54631fb69a6e44b2ba522f7c22a6fb2667a02fd97d636048478db2fd8c4e98fe"}, - {file = "aiohttp-3.9.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:4b4c452d0190c5a820d3f5c0f3cd8a28ace48c54053e24da9d6041bf81113183"}, - {file = "aiohttp-3.9.1-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:cae4c0c2ca800c793cae07ef3d40794625471040a87e1ba392039639ad61ab5b"}, - {file = "aiohttp-3.9.1-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:565760d6812b8d78d416c3c7cfdf5362fbe0d0d25b82fed75d0d29e18d7fc30f"}, - {file = "aiohttp-3.9.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:54311eb54f3a0c45efb9ed0d0a8f43d1bc6060d773f6973efd90037a51cd0a3f"}, - {file = "aiohttp-3.9.1-cp312-cp312-win32.whl", hash = "sha256:85c3e3c9cb1d480e0b9a64c658cd66b3cfb8e721636ab8b0e746e2d79a7a9eed"}, - {file = "aiohttp-3.9.1-cp312-cp312-win_amd64.whl", hash = "sha256:11cb254e397a82efb1805d12561e80124928e04e9c4483587ce7390b3866d213"}, - {file = "aiohttp-3.9.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:8a22a34bc594d9d24621091d1b91511001a7eea91d6652ea495ce06e27381f70"}, - {file = "aiohttp-3.9.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:598db66eaf2e04aa0c8900a63b0101fdc5e6b8a7ddd805c56d86efb54eb66672"}, - {file = "aiohttp-3.9.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:2c9376e2b09895c8ca8b95362283365eb5c03bdc8428ade80a864160605715f1"}, - {file = "aiohttp-3.9.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41473de252e1797c2d2293804e389a6d6986ef37cbb4a25208de537ae32141dd"}, - {file = "aiohttp-3.9.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9c5857612c9813796960c00767645cb5da815af16dafb32d70c72a8390bbf690"}, - {file = "aiohttp-3.9.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ffcd828e37dc219a72c9012ec44ad2e7e3066bec6ff3aaa19e7d435dbf4032ca"}, - {file = "aiohttp-3.9.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:219a16763dc0294842188ac8a12262b5671817042b35d45e44fd0a697d8c8361"}, - {file = "aiohttp-3.9.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f694dc8a6a3112059258a725a4ebe9acac5fe62f11c77ac4dcf896edfa78ca28"}, - {file = "aiohttp-3.9.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:bcc0ea8d5b74a41b621ad4a13d96c36079c81628ccc0b30cfb1603e3dfa3a014"}, - {file = "aiohttp-3.9.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:90ec72d231169b4b8d6085be13023ece8fa9b1bb495e4398d847e25218e0f431"}, - {file = "aiohttp-3.9.1-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:cf2a0ac0615842b849f40c4d7f304986a242f1e68286dbf3bd7a835e4f83acfd"}, - {file = "aiohttp-3.9.1-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:0e49b08eafa4f5707ecfb321ab9592717a319e37938e301d462f79b4e860c32a"}, - {file = "aiohttp-3.9.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:2c59e0076ea31c08553e868cec02d22191c086f00b44610f8ab7363a11a5d9d8"}, - {file = "aiohttp-3.9.1-cp38-cp38-win32.whl", hash = "sha256:4831df72b053b1eed31eb00a2e1aff6896fb4485301d4ccb208cac264b648db4"}, - {file = "aiohttp-3.9.1-cp38-cp38-win_amd64.whl", hash = "sha256:3135713c5562731ee18f58d3ad1bf41e1d8883eb68b363f2ffde5b2ea4b84cc7"}, - {file = "aiohttp-3.9.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:cfeadf42840c1e870dc2042a232a8748e75a36b52d78968cda6736de55582766"}, - {file = "aiohttp-3.9.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:70907533db712f7aa791effb38efa96f044ce3d4e850e2d7691abd759f4f0ae0"}, - {file = "aiohttp-3.9.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:cdefe289681507187e375a5064c7599f52c40343a8701761c802c1853a504558"}, - {file = "aiohttp-3.9.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7481f581251bb5558ba9f635db70908819caa221fc79ee52a7f58392778c636"}, - {file = "aiohttp-3.9.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:49f0c1b3c2842556e5de35f122fc0f0b721334ceb6e78c3719693364d4af8499"}, - {file = "aiohttp-3.9.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0d406b01a9f5a7e232d1b0d161b40c05275ffbcbd772dc18c1d5a570961a1ca4"}, - {file = "aiohttp-3.9.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d8e4450e7fe24d86e86b23cc209e0023177b6d59502e33807b732d2deb6975f"}, - {file = "aiohttp-3.9.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3c0266cd6f005e99f3f51e583012de2778e65af6b73860038b968a0a8888487a"}, - {file = "aiohttp-3.9.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ab221850108a4a063c5b8a70f00dd7a1975e5a1713f87f4ab26a46e5feac5a0e"}, - {file = "aiohttp-3.9.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:c88a15f272a0ad3d7773cf3a37cc7b7d077cbfc8e331675cf1346e849d97a4e5"}, - {file = "aiohttp-3.9.1-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:237533179d9747080bcaad4d02083ce295c0d2eab3e9e8ce103411a4312991a0"}, - {file = "aiohttp-3.9.1-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:02ab6006ec3c3463b528374c4cdce86434e7b89ad355e7bf29e2f16b46c7dd6f"}, - {file = "aiohttp-3.9.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04fa38875e53eb7e354ece1607b1d2fdee2d175ea4e4d745f6ec9f751fe20c7c"}, - {file = "aiohttp-3.9.1-cp39-cp39-win32.whl", hash = "sha256:82eefaf1a996060602f3cc1112d93ba8b201dbf5d8fd9611227de2003dddb3b7"}, - {file = "aiohttp-3.9.1-cp39-cp39-win_amd64.whl", hash = "sha256:9b05d33ff8e6b269e30a7957bd3244ffbce2a7a35a81b81c382629b80af1a8bf"}, - {file = "aiohttp-3.9.1.tar.gz", hash = "sha256:8fc49a87ac269d4529da45871e2ffb6874e87779c3d0e2ccd813c0899221239d"}, -] - -[package.dependencies] -aiosignal = ">=1.1.2" -async-timeout = {version = ">=4.0,<5.0", markers = "python_version < \"3.11\""} -attrs = ">=17.3.0" -frozenlist = ">=1.1.1" -multidict = ">=4.5,<7.0" -yarl = ">=1.0,<2.0" - -[package.extras] -speedups = ["Brotli", "aiodns", "brotlicffi"] - -[[package]] -name = "aiosignal" -version = "1.3.1" -description = "aiosignal: a list of registered asynchronous callbacks" -optional = false -python-versions = ">=3.7" -files = [ - {file = "aiosignal-1.3.1-py3-none-any.whl", hash = "sha256:f8376fb07dd1e86a584e4fcdec80b36b7f81aac666ebc724e2c090300dd83b17"}, - {file = "aiosignal-1.3.1.tar.gz", hash = "sha256:54cd96e15e1649b75d6c87526a6ff0b6c1b0dd3459f43d9ca11d48c339b68cfc"}, -] - -[package.dependencies] -frozenlist = ">=1.1.0" - -[[package]] -name = "annotated-types" -version = "0.6.0" -description = "Reusable constraint types to use with typing.Annotated" -optional = false -python-versions = ">=3.8" -files = [ - {file = "annotated_types-0.6.0-py3-none-any.whl", hash = "sha256:0641064de18ba7a25dee8f96403ebc39113d0cb953a01429249d5c7564666a43"}, - {file = "annotated_types-0.6.0.tar.gz", hash = "sha256:563339e807e53ffd9c267e99fc6d9ea23eb8443c08f112651963e24e22f84a5d"}, -] - -[package.dependencies] -typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.9\""} - -[[package]] -name = "async-timeout" -version = "4.0.3" -description = "Timeout context manager for asyncio programs" -optional = false -python-versions = ">=3.7" -files = [ - {file = "async-timeout-4.0.3.tar.gz", hash = "sha256:4640d96be84d82d02ed59ea2b7105a0f7b33abe8703703cd0ab0bf87c427522f"}, - {file = "async_timeout-4.0.3-py3-none-any.whl", hash = "sha256:7405140ff1230c310e51dc27b3145b9092d659ce68ff733fb0cefe3ee42be028"}, -] - -[[package]] -name = "attrs" -version = "23.1.0" -description = "Classes Without Boilerplate" -optional = false -python-versions = ">=3.7" -files = [ - {file = "attrs-23.1.0-py3-none-any.whl", hash = "sha256:1f28b4522cdc2fb4256ac1a020c78acf9cba2c6b461ccd2c126f3aa8e8335d04"}, - {file = "attrs-23.1.0.tar.gz", hash = "sha256:6279836d581513a26f1bf235f9acd333bc9115683f14f7e8fae46c98fc50e015"}, -] - -[package.extras] -cov = ["attrs[tests]", "coverage[toml] (>=5.3)"] -dev = ["attrs[docs,tests]", "pre-commit"] -docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier", "zope-interface"] -tests = ["attrs[tests-no-zope]", "zope-interface"] -tests-no-zope = ["cloudpickle", "hypothesis", "mypy (>=1.1.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] - -[[package]] -name = "backoff" -version = "2.2.1" -description = "Function decoration for backoff and retry" -optional = false -python-versions = ">=3.7,<4.0" -files = [ - {file = "backoff-2.2.1-py3-none-any.whl", hash = "sha256:63579f9a0628e06278f7e47b7d7d5b6ce20dc65c5e96a6f3ca99a6adca0396e8"}, - {file = "backoff-2.2.1.tar.gz", hash = "sha256:03f829f5bb1923180821643f8753b0502c3b682293992485b0eef2807afa5cba"}, -] - -[[package]] -name = "certifi" -version = "2023.11.17" -description = "Python package for providing Mozilla's CA Bundle." -optional = false -python-versions = ">=3.6" -files = [ - {file = "certifi-2023.11.17-py3-none-any.whl", hash = "sha256:e036ab49d5b79556f99cfc2d9320b34cfbe5be05c5871b51de9329f0603b0474"}, - {file = "certifi-2023.11.17.tar.gz", hash = "sha256:9b469f3a900bf28dc19b8cfbf8019bf47f7fdd1a65a1d4ffb98fc14166beb4d1"}, -] - -[[package]] -name = "charset-normalizer" -version = "3.3.2" -description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." -optional = false -python-versions = ">=3.7.0" -files = [ - {file = "charset-normalizer-3.3.2.tar.gz", hash = "sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-win32.whl", hash = "sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-win32.whl", hash = "sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-win32.whl", hash = "sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-win32.whl", hash = "sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-win32.whl", hash = "sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-win32.whl", hash = "sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d"}, - {file = "charset_normalizer-3.3.2-py3-none-any.whl", hash = "sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc"}, -] - -[[package]] -name = "frozenlist" -version = "1.4.0" -description = "A list-like structure which implements collections.abc.MutableSequence" -optional = false -python-versions = ">=3.8" -files = [ - {file = "frozenlist-1.4.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:764226ceef3125e53ea2cb275000e309c0aa5464d43bd72abd661e27fffc26ab"}, - {file = "frozenlist-1.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d6484756b12f40003c6128bfcc3fa9f0d49a687e171186c2d85ec82e3758c559"}, - {file = "frozenlist-1.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9ac08e601308e41eb533f232dbf6b7e4cea762f9f84f6357136eed926c15d12c"}, - {file = "frozenlist-1.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d081f13b095d74b67d550de04df1c756831f3b83dc9881c38985834387487f1b"}, - {file = "frozenlist-1.4.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:71932b597f9895f011f47f17d6428252fc728ba2ae6024e13c3398a087c2cdea"}, - {file = "frozenlist-1.4.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:981b9ab5a0a3178ff413bca62526bb784249421c24ad7381e39d67981be2c326"}, - {file = "frozenlist-1.4.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e41f3de4df3e80de75845d3e743b3f1c4c8613c3997a912dbf0229fc61a8b963"}, - {file = "frozenlist-1.4.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6918d49b1f90821e93069682c06ffde41829c346c66b721e65a5c62b4bab0300"}, - {file = "frozenlist-1.4.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0e5c8764c7829343d919cc2dfc587a8db01c4f70a4ebbc49abde5d4b158b007b"}, - {file = "frozenlist-1.4.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:8d0edd6b1c7fb94922bf569c9b092ee187a83f03fb1a63076e7774b60f9481a8"}, - {file = "frozenlist-1.4.0-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:e29cda763f752553fa14c68fb2195150bfab22b352572cb36c43c47bedba70eb"}, - {file = "frozenlist-1.4.0-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:0c7c1b47859ee2cac3846fde1c1dc0f15da6cec5a0e5c72d101e0f83dcb67ff9"}, - {file = "frozenlist-1.4.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:901289d524fdd571be1c7be054f48b1f88ce8dddcbdf1ec698b27d4b8b9e5d62"}, - {file = "frozenlist-1.4.0-cp310-cp310-win32.whl", hash = "sha256:1a0848b52815006ea6596c395f87449f693dc419061cc21e970f139d466dc0a0"}, - {file = "frozenlist-1.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:b206646d176a007466358aa21d85cd8600a415c67c9bd15403336c331a10d956"}, - {file = "frozenlist-1.4.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:de343e75f40e972bae1ef6090267f8260c1446a1695e77096db6cfa25e759a95"}, - {file = "frozenlist-1.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ad2a9eb6d9839ae241701d0918f54c51365a51407fd80f6b8289e2dfca977cc3"}, - {file = "frozenlist-1.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:bd7bd3b3830247580de99c99ea2a01416dfc3c34471ca1298bccabf86d0ff4dc"}, - {file = "frozenlist-1.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bdf1847068c362f16b353163391210269e4f0569a3c166bc6a9f74ccbfc7e839"}, - {file = "frozenlist-1.4.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:38461d02d66de17455072c9ba981d35f1d2a73024bee7790ac2f9e361ef1cd0c"}, - {file = "frozenlist-1.4.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d5a32087d720c608f42caed0ef36d2b3ea61a9d09ee59a5142d6070da9041b8f"}, - {file = "frozenlist-1.4.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dd65632acaf0d47608190a71bfe46b209719bf2beb59507db08ccdbe712f969b"}, - {file = "frozenlist-1.4.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:261b9f5d17cac914531331ff1b1d452125bf5daa05faf73b71d935485b0c510b"}, - {file = "frozenlist-1.4.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:b89ac9768b82205936771f8d2eb3ce88503b1556324c9f903e7156669f521472"}, - {file = "frozenlist-1.4.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:008eb8b31b3ea6896da16c38c1b136cb9fec9e249e77f6211d479db79a4eaf01"}, - {file = "frozenlist-1.4.0-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:e74b0506fa5aa5598ac6a975a12aa8928cbb58e1f5ac8360792ef15de1aa848f"}, - {file = "frozenlist-1.4.0-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:490132667476f6781b4c9458298b0c1cddf237488abd228b0b3650e5ecba7467"}, - {file = "frozenlist-1.4.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:76d4711f6f6d08551a7e9ef28c722f4a50dd0fc204c56b4bcd95c6cc05ce6fbb"}, - {file = "frozenlist-1.4.0-cp311-cp311-win32.whl", hash = "sha256:a02eb8ab2b8f200179b5f62b59757685ae9987996ae549ccf30f983f40602431"}, - {file = "frozenlist-1.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:515e1abc578dd3b275d6a5114030b1330ba044ffba03f94091842852f806f1c1"}, - {file = "frozenlist-1.4.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:f0ed05f5079c708fe74bf9027e95125334b6978bf07fd5ab923e9e55e5fbb9d3"}, - {file = "frozenlist-1.4.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ca265542ca427bf97aed183c1676e2a9c66942e822b14dc6e5f42e038f92a503"}, - {file = "frozenlist-1.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:491e014f5c43656da08958808588cc6c016847b4360e327a62cb308c791bd2d9"}, - {file = "frozenlist-1.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:17ae5cd0f333f94f2e03aaf140bb762c64783935cc764ff9c82dff626089bebf"}, - {file = "frozenlist-1.4.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1e78fb68cf9c1a6aa4a9a12e960a5c9dfbdb89b3695197aa7064705662515de2"}, - {file = "frozenlist-1.4.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d5655a942f5f5d2c9ed93d72148226d75369b4f6952680211972a33e59b1dfdc"}, - {file = "frozenlist-1.4.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c11b0746f5d946fecf750428a95f3e9ebe792c1ee3b1e96eeba145dc631a9672"}, - {file = "frozenlist-1.4.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e66d2a64d44d50d2543405fb183a21f76b3b5fd16f130f5c99187c3fb4e64919"}, - {file = "frozenlist-1.4.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:88f7bc0fcca81f985f78dd0fa68d2c75abf8272b1f5c323ea4a01a4d7a614efc"}, - {file = "frozenlist-1.4.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:5833593c25ac59ede40ed4de6d67eb42928cca97f26feea219f21d0ed0959b79"}, - {file = "frozenlist-1.4.0-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:fec520865f42e5c7f050c2a79038897b1c7d1595e907a9e08e3353293ffc948e"}, - {file = "frozenlist-1.4.0-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:b826d97e4276750beca7c8f0f1a4938892697a6bcd8ec8217b3312dad6982781"}, - {file = "frozenlist-1.4.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:ceb6ec0a10c65540421e20ebd29083c50e6d1143278746a4ef6bcf6153171eb8"}, - {file = "frozenlist-1.4.0-cp38-cp38-win32.whl", hash = "sha256:2b8bcf994563466db019fab287ff390fffbfdb4f905fc77bc1c1d604b1c689cc"}, - {file = "frozenlist-1.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:a6c8097e01886188e5be3e6b14e94ab365f384736aa1fca6a0b9e35bd4a30bc7"}, - {file = "frozenlist-1.4.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:6c38721585f285203e4b4132a352eb3daa19121a035f3182e08e437cface44bf"}, - {file = "frozenlist-1.4.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a0c6da9aee33ff0b1a451e867da0c1f47408112b3391dd43133838339e410963"}, - {file = "frozenlist-1.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:93ea75c050c5bb3d98016b4ba2497851eadf0ac154d88a67d7a6816206f6fa7f"}, - {file = "frozenlist-1.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f61e2dc5ad442c52b4887f1fdc112f97caeff4d9e6ebe78879364ac59f1663e1"}, - {file = "frozenlist-1.4.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aa384489fefeb62321b238e64c07ef48398fe80f9e1e6afeff22e140e0850eef"}, - {file = "frozenlist-1.4.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:10ff5faaa22786315ef57097a279b833ecab1a0bfb07d604c9cbb1c4cdc2ed87"}, - {file = "frozenlist-1.4.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:007df07a6e3eb3e33e9a1fe6a9db7af152bbd8a185f9aaa6ece10a3529e3e1c6"}, - {file = "frozenlist-1.4.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f4f399d28478d1f604c2ff9119907af9726aed73680e5ed1ca634d377abb087"}, - {file = "frozenlist-1.4.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:c5374b80521d3d3f2ec5572e05adc94601985cc526fb276d0c8574a6d749f1b3"}, - {file = "frozenlist-1.4.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:ce31ae3e19f3c902de379cf1323d90c649425b86de7bbdf82871b8a2a0615f3d"}, - {file = "frozenlist-1.4.0-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:7211ef110a9194b6042449431e08c4d80c0481e5891e58d429df5899690511c2"}, - {file = "frozenlist-1.4.0-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:556de4430ce324c836789fa4560ca62d1591d2538b8ceb0b4f68fb7b2384a27a"}, - {file = "frozenlist-1.4.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:7645a8e814a3ee34a89c4a372011dcd817964ce8cb273c8ed6119d706e9613e3"}, - {file = "frozenlist-1.4.0-cp39-cp39-win32.whl", hash = "sha256:19488c57c12d4e8095a922f328df3f179c820c212940a498623ed39160bc3c2f"}, - {file = "frozenlist-1.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:6221d84d463fb110bdd7619b69cb43878a11d51cbb9394ae3105d082d5199167"}, - {file = "frozenlist-1.4.0.tar.gz", hash = "sha256:09163bdf0b2907454042edb19f887c6d33806adc71fbd54afc14908bfdc22251"}, -] - -[[package]] -name = "idna" -version = "3.6" -description = "Internationalized Domain Names in Applications (IDNA)" -optional = false -python-versions = ">=3.5" -files = [ - {file = "idna-3.6-py3-none-any.whl", hash = "sha256:c05567e9c24a6b9faaa835c4821bad0590fbb9d5779e7caa6e1cc4978e7eb24f"}, - {file = "idna-3.6.tar.gz", hash = "sha256:9ecdbbd083b06798ae1e86adcbfe8ab1479cf864e4ee30fe4e46a003d12491ca"}, -] - -[[package]] -name = "multidict" -version = "6.0.4" -description = "multidict implementation" -optional = false -python-versions = ">=3.7" -files = [ - {file = "multidict-6.0.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:0b1a97283e0c85772d613878028fec909f003993e1007eafa715b24b377cb9b8"}, - {file = "multidict-6.0.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:eeb6dcc05e911516ae3d1f207d4b0520d07f54484c49dfc294d6e7d63b734171"}, - {file = "multidict-6.0.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d6d635d5209b82a3492508cf5b365f3446afb65ae7ebd755e70e18f287b0adf7"}, - {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c048099e4c9e9d615545e2001d3d8a4380bd403e1a0578734e0d31703d1b0c0b"}, - {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ea20853c6dbbb53ed34cb4d080382169b6f4554d394015f1bef35e881bf83547"}, - {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:16d232d4e5396c2efbbf4f6d4df89bfa905eb0d4dc5b3549d872ab898451f569"}, - {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:36c63aaa167f6c6b04ef2c85704e93af16c11d20de1d133e39de6a0e84582a93"}, - {file = "multidict-6.0.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:64bdf1086b6043bf519869678f5f2757f473dee970d7abf6da91ec00acb9cb98"}, - {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:43644e38f42e3af682690876cff722d301ac585c5b9e1eacc013b7a3f7b696a0"}, - {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:7582a1d1030e15422262de9f58711774e02fa80df0d1578995c76214f6954988"}, - {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:ddff9c4e225a63a5afab9dd15590432c22e8057e1a9a13d28ed128ecf047bbdc"}, - {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:ee2a1ece51b9b9e7752e742cfb661d2a29e7bcdba2d27e66e28a99f1890e4fa0"}, - {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a2e4369eb3d47d2034032a26c7a80fcb21a2cb22e1173d761a162f11e562caa5"}, - {file = "multidict-6.0.4-cp310-cp310-win32.whl", hash = "sha256:574b7eae1ab267e5f8285f0fe881f17efe4b98c39a40858247720935b893bba8"}, - {file = "multidict-6.0.4-cp310-cp310-win_amd64.whl", hash = "sha256:4dcbb0906e38440fa3e325df2359ac6cb043df8e58c965bb45f4e406ecb162cc"}, - {file = "multidict-6.0.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:0dfad7a5a1e39c53ed00d2dd0c2e36aed4650936dc18fd9a1826a5ae1cad6f03"}, - {file = "multidict-6.0.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:64da238a09d6039e3bd39bb3aee9c21a5e34f28bfa5aa22518581f910ff94af3"}, - {file = "multidict-6.0.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ff959bee35038c4624250473988b24f846cbeb2c6639de3602c073f10410ceba"}, - {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:01a3a55bd90018c9c080fbb0b9f4891db37d148a0a18722b42f94694f8b6d4c9"}, - {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c5cb09abb18c1ea940fb99360ea0396f34d46566f157122c92dfa069d3e0e982"}, - {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:666daae833559deb2d609afa4490b85830ab0dfca811a98b70a205621a6109fe"}, - {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:11bdf3f5e1518b24530b8241529d2050014c884cf18b6fc69c0c2b30ca248710"}, - {file = "multidict-6.0.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7d18748f2d30f94f498e852c67d61261c643b349b9d2a581131725595c45ec6c"}, - {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:458f37be2d9e4c95e2d8866a851663cbc76e865b78395090786f6cd9b3bbf4f4"}, - {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:b1a2eeedcead3a41694130495593a559a668f382eee0727352b9a41e1c45759a"}, - {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:7d6ae9d593ef8641544d6263c7fa6408cc90370c8cb2bbb65f8d43e5b0351d9c"}, - {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:5979b5632c3e3534e42ca6ff856bb24b2e3071b37861c2c727ce220d80eee9ed"}, - {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:dcfe792765fab89c365123c81046ad4103fcabbc4f56d1c1997e6715e8015461"}, - {file = "multidict-6.0.4-cp311-cp311-win32.whl", hash = "sha256:3601a3cece3819534b11d4efc1eb76047488fddd0c85a3948099d5da4d504636"}, - {file = "multidict-6.0.4-cp311-cp311-win_amd64.whl", hash = "sha256:81a4f0b34bd92df3da93315c6a59034df95866014ac08535fc819f043bfd51f0"}, - {file = "multidict-6.0.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:67040058f37a2a51ed8ea8f6b0e6ee5bd78ca67f169ce6122f3e2ec80dfe9b78"}, - {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:853888594621e6604c978ce2a0444a1e6e70c8d253ab65ba11657659dcc9100f"}, - {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:39ff62e7d0f26c248b15e364517a72932a611a9b75f35b45be078d81bdb86603"}, - {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:af048912e045a2dc732847d33821a9d84ba553f5c5f028adbd364dd4765092ac"}, - {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1e8b901e607795ec06c9e42530788c45ac21ef3aaa11dbd0c69de543bfb79a9"}, - {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:62501642008a8b9871ddfccbf83e4222cf8ac0d5aeedf73da36153ef2ec222d2"}, - {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:99b76c052e9f1bc0721f7541e5e8c05db3941eb9ebe7b8553c625ef88d6eefde"}, - {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:509eac6cf09c794aa27bcacfd4d62c885cce62bef7b2c3e8b2e49d365b5003fe"}, - {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:21a12c4eb6ddc9952c415f24eef97e3e55ba3af61f67c7bc388dcdec1404a067"}, - {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:5cad9430ab3e2e4fa4a2ef4450f548768400a2ac635841bc2a56a2052cdbeb87"}, - {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:ab55edc2e84460694295f401215f4a58597f8f7c9466faec545093045476327d"}, - {file = "multidict-6.0.4-cp37-cp37m-win32.whl", hash = "sha256:5a4dcf02b908c3b8b17a45fb0f15b695bf117a67b76b7ad18b73cf8e92608775"}, - {file = "multidict-6.0.4-cp37-cp37m-win_amd64.whl", hash = "sha256:6ed5f161328b7df384d71b07317f4d8656434e34591f20552c7bcef27b0ab88e"}, - {file = "multidict-6.0.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5fc1b16f586f049820c5c5b17bb4ee7583092fa0d1c4e28b5239181ff9532e0c"}, - {file = "multidict-6.0.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1502e24330eb681bdaa3eb70d6358e818e8e8f908a22a1851dfd4e15bc2f8161"}, - {file = "multidict-6.0.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b692f419760c0e65d060959df05f2a531945af31fda0c8a3b3195d4efd06de11"}, - {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45e1ecb0379bfaab5eef059f50115b54571acfbe422a14f668fc8c27ba410e7e"}, - {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ddd3915998d93fbcd2566ddf9cf62cdb35c9e093075f862935573d265cf8f65d"}, - {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:59d43b61c59d82f2effb39a93c48b845efe23a3852d201ed2d24ba830d0b4cf2"}, - {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cc8e1d0c705233c5dd0c5e6460fbad7827d5d36f310a0fadfd45cc3029762258"}, - {file = "multidict-6.0.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d6aa0418fcc838522256761b3415822626f866758ee0bc6632c9486b179d0b52"}, - {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6748717bb10339c4760c1e63da040f5f29f5ed6e59d76daee30305894069a660"}, - {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:4d1a3d7ef5e96b1c9e92f973e43aa5e5b96c659c9bc3124acbbd81b0b9c8a951"}, - {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:4372381634485bec7e46718edc71528024fcdc6f835baefe517b34a33c731d60"}, - {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:fc35cb4676846ef752816d5be2193a1e8367b4c1397b74a565a9d0389c433a1d"}, - {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:4b9d9e4e2b37daddb5c23ea33a3417901fa7c7b3dee2d855f63ee67a0b21e5b1"}, - {file = "multidict-6.0.4-cp38-cp38-win32.whl", hash = "sha256:e41b7e2b59679edfa309e8db64fdf22399eec4b0b24694e1b2104fb789207779"}, - {file = "multidict-6.0.4-cp38-cp38-win_amd64.whl", hash = "sha256:d6c254ba6e45d8e72739281ebc46ea5eb5f101234f3ce171f0e9f5cc86991480"}, - {file = "multidict-6.0.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:16ab77bbeb596e14212e7bab8429f24c1579234a3a462105cda4a66904998664"}, - {file = "multidict-6.0.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bc779e9e6f7fda81b3f9aa58e3a6091d49ad528b11ed19f6621408806204ad35"}, - {file = "multidict-6.0.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4ceef517eca3e03c1cceb22030a3e39cb399ac86bff4e426d4fc6ae49052cc60"}, - {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:281af09f488903fde97923c7744bb001a9b23b039a909460d0f14edc7bf59706"}, - {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:52f2dffc8acaba9a2f27174c41c9e57f60b907bb9f096b36b1a1f3be71c6284d"}, - {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b41156839806aecb3641f3208c0dafd3ac7775b9c4c422d82ee2a45c34ba81ca"}, - {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d5e3fc56f88cc98ef8139255cf8cd63eb2c586531e43310ff859d6bb3a6b51f1"}, - {file = "multidict-6.0.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8316a77808c501004802f9beebde51c9f857054a0c871bd6da8280e718444449"}, - {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:f70b98cd94886b49d91170ef23ec5c0e8ebb6f242d734ed7ed677b24d50c82cf"}, - {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:bf6774e60d67a9efe02b3616fee22441d86fab4c6d335f9d2051d19d90a40063"}, - {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:e69924bfcdda39b722ef4d9aa762b2dd38e4632b3641b1d9a57ca9cd18f2f83a"}, - {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:6b181d8c23da913d4ff585afd1155a0e1194c0b50c54fcfe286f70cdaf2b7176"}, - {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:52509b5be062d9eafc8170e53026fbc54cf3b32759a23d07fd935fb04fc22d95"}, - {file = "multidict-6.0.4-cp39-cp39-win32.whl", hash = "sha256:27c523fbfbdfd19c6867af7346332b62b586eed663887392cff78d614f9ec313"}, - {file = "multidict-6.0.4-cp39-cp39-win_amd64.whl", hash = "sha256:33029f5734336aa0d4c0384525da0387ef89148dc7191aae00ca5fb23d7aafc2"}, - {file = "multidict-6.0.4.tar.gz", hash = "sha256:3666906492efb76453c0e7b97f2cf459b0682e7402c0489a95484965dbc1da49"}, -] - -[[package]] -name = "mypy" -version = "1.7.1" -description = "Optional static typing for Python" -optional = false -python-versions = ">=3.8" -files = [ - {file = "mypy-1.7.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:12cce78e329838d70a204293e7b29af9faa3ab14899aec397798a4b41be7f340"}, - {file = "mypy-1.7.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1484b8fa2c10adf4474f016e09d7a159602f3239075c7bf9f1627f5acf40ad49"}, - {file = "mypy-1.7.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:31902408f4bf54108bbfb2e35369877c01c95adc6192958684473658c322c8a5"}, - {file = "mypy-1.7.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f2c2521a8e4d6d769e3234350ba7b65ff5d527137cdcde13ff4d99114b0c8e7d"}, - {file = "mypy-1.7.1-cp310-cp310-win_amd64.whl", hash = "sha256:fcd2572dd4519e8a6642b733cd3a8cfc1ef94bafd0c1ceed9c94fe736cb65b6a"}, - {file = "mypy-1.7.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4b901927f16224d0d143b925ce9a4e6b3a758010673eeded9b748f250cf4e8f7"}, - {file = "mypy-1.7.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2f7f6985d05a4e3ce8255396df363046c28bea790e40617654e91ed580ca7c51"}, - {file = "mypy-1.7.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:944bdc21ebd620eafefc090cdf83158393ec2b1391578359776c00de00e8907a"}, - {file = "mypy-1.7.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9c7ac372232c928fff0645d85f273a726970c014749b924ce5710d7d89763a28"}, - {file = "mypy-1.7.1-cp311-cp311-win_amd64.whl", hash = "sha256:f6efc9bd72258f89a3816e3a98c09d36f079c223aa345c659622f056b760ab42"}, - {file = "mypy-1.7.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:6dbdec441c60699288adf051f51a5d512b0d818526d1dcfff5a41f8cd8b4aaf1"}, - {file = "mypy-1.7.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4fc3d14ee80cd22367caaaf6e014494415bf440980a3045bf5045b525680ac33"}, - {file = "mypy-1.7.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2c6e4464ed5f01dc44dc9821caf67b60a4e5c3b04278286a85c067010653a0eb"}, - {file = "mypy-1.7.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:d9b338c19fa2412f76e17525c1b4f2c687a55b156320acb588df79f2e6fa9fea"}, - {file = "mypy-1.7.1-cp312-cp312-win_amd64.whl", hash = "sha256:204e0d6de5fd2317394a4eff62065614c4892d5a4d1a7ee55b765d7a3d9e3f82"}, - {file = "mypy-1.7.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:84860e06ba363d9c0eeabd45ac0fde4b903ad7aa4f93cd8b648385a888e23200"}, - {file = "mypy-1.7.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:8c5091ebd294f7628eb25ea554852a52058ac81472c921150e3a61cdd68f75a7"}, - {file = "mypy-1.7.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40716d1f821b89838589e5b3106ebbc23636ffdef5abc31f7cd0266db936067e"}, - {file = "mypy-1.7.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5cf3f0c5ac72139797953bd50bc6c95ac13075e62dbfcc923571180bebb662e9"}, - {file = "mypy-1.7.1-cp38-cp38-win_amd64.whl", hash = "sha256:78e25b2fd6cbb55ddfb8058417df193f0129cad5f4ee75d1502248e588d9e0d7"}, - {file = "mypy-1.7.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:75c4d2a6effd015786c87774e04331b6da863fc3fc4e8adfc3b40aa55ab516fe"}, - {file = "mypy-1.7.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2643d145af5292ee956aa0a83c2ce1038a3bdb26e033dadeb2f7066fb0c9abce"}, - {file = "mypy-1.7.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75aa828610b67462ffe3057d4d8a4112105ed211596b750b53cbfe182f44777a"}, - {file = "mypy-1.7.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ee5d62d28b854eb61889cde4e1dbc10fbaa5560cb39780c3995f6737f7e82120"}, - {file = "mypy-1.7.1-cp39-cp39-win_amd64.whl", hash = "sha256:72cf32ce7dd3562373f78bd751f73c96cfb441de147cc2448a92c1a308bd0ca6"}, - {file = "mypy-1.7.1-py3-none-any.whl", hash = "sha256:f7c5d642db47376a0cc130f0de6d055056e010debdaf0707cd2b0fc7e7ef30ea"}, - {file = "mypy-1.7.1.tar.gz", hash = "sha256:fcb6d9afb1b6208b4c712af0dafdc650f518836065df0d4fb1d800f5d6773db2"}, -] - -[package.dependencies] -mypy-extensions = ">=1.0.0" -tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} -typing-extensions = ">=4.1.0" - -[package.extras] -dmypy = ["psutil (>=4.0)"] -install-types = ["pip"] -mypyc = ["setuptools (>=50)"] -reports = ["lxml"] - -[[package]] -name = "mypy-extensions" -version = "1.0.0" -description = "Type system extensions for programs checked with the mypy type checker." -optional = false -python-versions = ">=3.5" -files = [ - {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, - {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, -] - -[[package]] -name = "orjson" -version = "3.9.10" -description = "Fast, correct Python JSON library supporting dataclasses, datetimes, and numpy" -optional = false -python-versions = ">=3.8" -files = [ - {file = "orjson-3.9.10-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:c18a4da2f50050a03d1da5317388ef84a16013302a5281d6f64e4a3f406aabc4"}, - {file = "orjson-3.9.10-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5148bab4d71f58948c7c39d12b14a9005b6ab35a0bdf317a8ade9a9e4d9d0bd5"}, - {file = "orjson-3.9.10-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4cf7837c3b11a2dfb589f8530b3cff2bd0307ace4c301e8997e95c7468c1378e"}, - {file = "orjson-3.9.10-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c62b6fa2961a1dcc51ebe88771be5319a93fd89bd247c9ddf732bc250507bc2b"}, - {file = "orjson-3.9.10-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:deeb3922a7a804755bbe6b5be9b312e746137a03600f488290318936c1a2d4dc"}, - {file = "orjson-3.9.10-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1234dc92d011d3554d929b6cf058ac4a24d188d97be5e04355f1b9223e98bbe9"}, - {file = "orjson-3.9.10-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:06ad5543217e0e46fd7ab7ea45d506c76f878b87b1b4e369006bdb01acc05a83"}, - {file = "orjson-3.9.10-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4fd72fab7bddce46c6826994ce1e7de145ae1e9e106ebb8eb9ce1393ca01444d"}, - {file = "orjson-3.9.10-cp310-none-win32.whl", hash = "sha256:b5b7d4a44cc0e6ff98da5d56cde794385bdd212a86563ac321ca64d7f80c80d1"}, - {file = "orjson-3.9.10-cp310-none-win_amd64.whl", hash = "sha256:61804231099214e2f84998316f3238c4c2c4aaec302df12b21a64d72e2a135c7"}, - {file = "orjson-3.9.10-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:cff7570d492bcf4b64cc862a6e2fb77edd5e5748ad715f487628f102815165e9"}, - {file = "orjson-3.9.10-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed8bc367f725dfc5cabeed1ae079d00369900231fbb5a5280cf0736c30e2adf7"}, - {file = "orjson-3.9.10-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c812312847867b6335cfb264772f2a7e85b3b502d3a6b0586aa35e1858528ab1"}, - {file = "orjson-3.9.10-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9edd2856611e5050004f4722922b7b1cd6268da34102667bd49d2a2b18bafb81"}, - {file = "orjson-3.9.10-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:674eb520f02422546c40401f4efaf8207b5e29e420c17051cddf6c02783ff5ca"}, - {file = "orjson-3.9.10-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1d0dc4310da8b5f6415949bd5ef937e60aeb0eb6b16f95041b5e43e6200821fb"}, - {file = "orjson-3.9.10-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e99c625b8c95d7741fe057585176b1b8783d46ed4b8932cf98ee145c4facf499"}, - {file = "orjson-3.9.10-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:ec6f18f96b47299c11203edfbdc34e1b69085070d9a3d1f302810cc23ad36bf3"}, - {file = "orjson-3.9.10-cp311-none-win32.whl", hash = "sha256:ce0a29c28dfb8eccd0f16219360530bc3cfdf6bf70ca384dacd36e6c650ef8e8"}, - {file = "orjson-3.9.10-cp311-none-win_amd64.whl", hash = "sha256:cf80b550092cc480a0cbd0750e8189247ff45457e5a023305f7ef1bcec811616"}, - {file = "orjson-3.9.10-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:602a8001bdf60e1a7d544be29c82560a7b49319a0b31d62586548835bbe2c862"}, - {file = "orjson-3.9.10-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f295efcd47b6124b01255d1491f9e46f17ef40d3d7eabf7364099e463fb45f0f"}, - {file = "orjson-3.9.10-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:92af0d00091e744587221e79f68d617b432425a7e59328ca4c496f774a356071"}, - {file = "orjson-3.9.10-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c5a02360e73e7208a872bf65a7554c9f15df5fe063dc047f79738998b0506a14"}, - {file = "orjson-3.9.10-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:858379cbb08d84fe7583231077d9a36a1a20eb72f8c9076a45df8b083724ad1d"}, - {file = "orjson-3.9.10-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:666c6fdcaac1f13eb982b649e1c311c08d7097cbda24f32612dae43648d8db8d"}, - {file = "orjson-3.9.10-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:3fb205ab52a2e30354640780ce4587157a9563a68c9beaf52153e1cea9aa0921"}, - {file = "orjson-3.9.10-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:7ec960b1b942ee3c69323b8721df2a3ce28ff40e7ca47873ae35bfafeb4555ca"}, - {file = "orjson-3.9.10-cp312-none-win_amd64.whl", hash = "sha256:3e892621434392199efb54e69edfff9f699f6cc36dd9553c5bf796058b14b20d"}, - {file = "orjson-3.9.10-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:8b9ba0ccd5a7f4219e67fbbe25e6b4a46ceef783c42af7dbc1da548eb28b6531"}, - {file = "orjson-3.9.10-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2e2ecd1d349e62e3960695214f40939bbfdcaeaaa62ccc638f8e651cf0970e5f"}, - {file = "orjson-3.9.10-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7f433be3b3f4c66016d5a20e5b4444ef833a1f802ced13a2d852c637f69729c1"}, - {file = "orjson-3.9.10-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4689270c35d4bb3102e103ac43c3f0b76b169760aff8bcf2d401a3e0e58cdb7f"}, - {file = "orjson-3.9.10-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4bd176f528a8151a6efc5359b853ba3cc0e82d4cd1fab9c1300c5d957dc8f48c"}, - {file = "orjson-3.9.10-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a2ce5ea4f71681623f04e2b7dadede3c7435dfb5e5e2d1d0ec25b35530e277b"}, - {file = "orjson-3.9.10-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:49f8ad582da6e8d2cf663c4ba5bf9f83cc052570a3a767487fec6af839b0e777"}, - {file = "orjson-3.9.10-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:2a11b4b1a8415f105d989876a19b173f6cdc89ca13855ccc67c18efbd7cbd1f8"}, - {file = "orjson-3.9.10-cp38-none-win32.whl", hash = "sha256:a353bf1f565ed27ba71a419b2cd3db9d6151da426b61b289b6ba1422a702e643"}, - {file = "orjson-3.9.10-cp38-none-win_amd64.whl", hash = "sha256:e28a50b5be854e18d54f75ef1bb13e1abf4bc650ab9d635e4258c58e71eb6ad5"}, - {file = "orjson-3.9.10-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:ee5926746232f627a3be1cc175b2cfad24d0170d520361f4ce3fa2fd83f09e1d"}, - {file = "orjson-3.9.10-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a73160e823151f33cdc05fe2cea557c5ef12fdf276ce29bb4f1c571c8368a60"}, - {file = "orjson-3.9.10-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c338ed69ad0b8f8f8920c13f529889fe0771abbb46550013e3c3d01e5174deef"}, - {file = "orjson-3.9.10-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5869e8e130e99687d9e4be835116c4ebd83ca92e52e55810962446d841aba8de"}, - {file = "orjson-3.9.10-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d2c1e559d96a7f94a4f581e2a32d6d610df5840881a8cba8f25e446f4d792df3"}, - {file = "orjson-3.9.10-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:81a3a3a72c9811b56adf8bcc829b010163bb2fc308877e50e9910c9357e78521"}, - {file = "orjson-3.9.10-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:7f8fb7f5ecf4f6355683ac6881fd64b5bb2b8a60e3ccde6ff799e48791d8f864"}, - {file = "orjson-3.9.10-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c943b35ecdf7123b2d81d225397efddf0bce2e81db2f3ae633ead38e85cd5ade"}, - {file = "orjson-3.9.10-cp39-none-win32.whl", hash = "sha256:fb0b361d73f6b8eeceba47cd37070b5e6c9de5beaeaa63a1cb35c7e1a73ef088"}, - {file = "orjson-3.9.10-cp39-none-win_amd64.whl", hash = "sha256:b90f340cb6397ec7a854157fac03f0c82b744abdd1c0941a024c3c29d1340aff"}, - {file = "orjson-3.9.10.tar.gz", hash = "sha256:9ebbdbd6a046c304b1845e96fbcc5559cd296b4dfd3ad2509e33c4d9ce07d6a1"}, -] - -[[package]] -name = "pydantic" -version = "2.5.2" -description = "Data validation using Python type hints" -optional = false -python-versions = ">=3.7" -files = [ - {file = "pydantic-2.5.2-py3-none-any.whl", hash = "sha256:80c50fb8e3dcecfddae1adbcc00ec5822918490c99ab31f6cf6140ca1c1429f0"}, - {file = "pydantic-2.5.2.tar.gz", hash = "sha256:ff177ba64c6faf73d7afa2e8cad38fd456c0dbe01c9954e71038001cd15a6edd"}, -] - -[package.dependencies] -annotated-types = ">=0.4.0" -pydantic-core = "2.14.5" -typing-extensions = ">=4.6.1" - -[package.extras] -email = ["email-validator (>=2.0.0)"] - -[[package]] -name = "pydantic-core" -version = "2.14.5" -description = "" -optional = false -python-versions = ">=3.7" -files = [ - {file = "pydantic_core-2.14.5-cp310-cp310-macosx_10_7_x86_64.whl", hash = "sha256:7e88f5696153dc516ba6e79f82cc4747e87027205f0e02390c21f7cb3bd8abfd"}, - {file = "pydantic_core-2.14.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4641e8ad4efb697f38a9b64ca0523b557c7931c5f84e0fd377a9a3b05121f0de"}, - {file = "pydantic_core-2.14.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:774de879d212db5ce02dfbf5b0da9a0ea386aeba12b0b95674a4ce0593df3d07"}, - {file = "pydantic_core-2.14.5-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ebb4e035e28f49b6f1a7032920bb9a0c064aedbbabe52c543343d39341a5b2a3"}, - {file = "pydantic_core-2.14.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b53e9ad053cd064f7e473a5f29b37fc4cc9dc6d35f341e6afc0155ea257fc911"}, - {file = "pydantic_core-2.14.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8aa1768c151cf562a9992462239dfc356b3d1037cc5a3ac829bb7f3bda7cc1f9"}, - {file = "pydantic_core-2.14.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eac5c82fc632c599f4639a5886f96867ffced74458c7db61bc9a66ccb8ee3113"}, - {file = "pydantic_core-2.14.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d2ae91f50ccc5810b2f1b6b858257c9ad2e08da70bf890dee02de1775a387c66"}, - {file = "pydantic_core-2.14.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:6b9ff467ffbab9110e80e8c8de3bcfce8e8b0fd5661ac44a09ae5901668ba997"}, - {file = "pydantic_core-2.14.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:61ea96a78378e3bd5a0be99b0e5ed00057b71f66115f5404d0dae4819f495093"}, - {file = "pydantic_core-2.14.5-cp310-none-win32.whl", hash = "sha256:bb4c2eda937a5e74c38a41b33d8c77220380a388d689bcdb9b187cf6224c9720"}, - {file = "pydantic_core-2.14.5-cp310-none-win_amd64.whl", hash = "sha256:b7851992faf25eac90bfcb7bfd19e1f5ffa00afd57daec8a0042e63c74a4551b"}, - {file = "pydantic_core-2.14.5-cp311-cp311-macosx_10_7_x86_64.whl", hash = "sha256:4e40f2bd0d57dac3feb3a3aed50f17d83436c9e6b09b16af271b6230a2915459"}, - {file = "pydantic_core-2.14.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ab1cdb0f14dc161ebc268c09db04d2c9e6f70027f3b42446fa11c153521c0e88"}, - {file = "pydantic_core-2.14.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aae7ea3a1c5bb40c93cad361b3e869b180ac174656120c42b9fadebf685d121b"}, - {file = "pydantic_core-2.14.5-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:60b7607753ba62cf0739177913b858140f11b8af72f22860c28eabb2f0a61937"}, - {file = "pydantic_core-2.14.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2248485b0322c75aee7565d95ad0e16f1c67403a470d02f94da7344184be770f"}, - {file = "pydantic_core-2.14.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:823fcc638f67035137a5cd3f1584a4542d35a951c3cc68c6ead1df7dac825c26"}, - {file = "pydantic_core-2.14.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:96581cfefa9123accc465a5fd0cc833ac4d75d55cc30b633b402e00e7ced00a6"}, - {file = "pydantic_core-2.14.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a33324437018bf6ba1bb0f921788788641439e0ed654b233285b9c69704c27b4"}, - {file = "pydantic_core-2.14.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:9bd18fee0923ca10f9a3ff67d4851c9d3e22b7bc63d1eddc12f439f436f2aada"}, - {file = "pydantic_core-2.14.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:853a2295c00f1d4429db4c0fb9475958543ee80cfd310814b5c0ef502de24dda"}, - {file = "pydantic_core-2.14.5-cp311-none-win32.whl", hash = "sha256:cb774298da62aea5c80a89bd58c40205ab4c2abf4834453b5de207d59d2e1651"}, - {file = "pydantic_core-2.14.5-cp311-none-win_amd64.whl", hash = "sha256:e87fc540c6cac7f29ede02e0f989d4233f88ad439c5cdee56f693cc9c1c78077"}, - {file = "pydantic_core-2.14.5-cp311-none-win_arm64.whl", hash = "sha256:57d52fa717ff445cb0a5ab5237db502e6be50809b43a596fb569630c665abddf"}, - {file = "pydantic_core-2.14.5-cp312-cp312-macosx_10_7_x86_64.whl", hash = "sha256:e60f112ac88db9261ad3a52032ea46388378034f3279c643499edb982536a093"}, - {file = "pydantic_core-2.14.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6e227c40c02fd873c2a73a98c1280c10315cbebe26734c196ef4514776120aeb"}, - {file = "pydantic_core-2.14.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f0cbc7fff06a90bbd875cc201f94ef0ee3929dfbd5c55a06674b60857b8b85ed"}, - {file = "pydantic_core-2.14.5-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:103ef8d5b58596a731b690112819501ba1db7a36f4ee99f7892c40da02c3e189"}, - {file = "pydantic_core-2.14.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c949f04ecad823f81b1ba94e7d189d9dfb81edbb94ed3f8acfce41e682e48cef"}, - {file = "pydantic_core-2.14.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c1452a1acdf914d194159439eb21e56b89aa903f2e1c65c60b9d874f9b950e5d"}, - {file = "pydantic_core-2.14.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cb4679d4c2b089e5ef89756bc73e1926745e995d76e11925e3e96a76d5fa51fc"}, - {file = "pydantic_core-2.14.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:cf9d3fe53b1ee360e2421be95e62ca9b3296bf3f2fb2d3b83ca49ad3f925835e"}, - {file = "pydantic_core-2.14.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:70f4b4851dbb500129681d04cc955be2a90b2248d69273a787dda120d5cf1f69"}, - {file = "pydantic_core-2.14.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:59986de5710ad9613ff61dd9b02bdd2f615f1a7052304b79cc8fa2eb4e336d2d"}, - {file = "pydantic_core-2.14.5-cp312-none-win32.whl", hash = "sha256:699156034181e2ce106c89ddb4b6504c30db8caa86e0c30de47b3e0654543260"}, - {file = "pydantic_core-2.14.5-cp312-none-win_amd64.whl", hash = "sha256:5baab5455c7a538ac7e8bf1feec4278a66436197592a9bed538160a2e7d11e36"}, - {file = "pydantic_core-2.14.5-cp312-none-win_arm64.whl", hash = "sha256:e47e9a08bcc04d20975b6434cc50bf82665fbc751bcce739d04a3120428f3e27"}, - {file = "pydantic_core-2.14.5-cp37-cp37m-macosx_10_7_x86_64.whl", hash = "sha256:af36f36538418f3806048f3b242a1777e2540ff9efaa667c27da63d2749dbce0"}, - {file = "pydantic_core-2.14.5-cp37-cp37m-macosx_11_0_arm64.whl", hash = "sha256:45e95333b8418ded64745f14574aa9bfc212cb4fbeed7a687b0c6e53b5e188cd"}, - {file = "pydantic_core-2.14.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e47a76848f92529879ecfc417ff88a2806438f57be4a6a8bf2961e8f9ca9ec7"}, - {file = "pydantic_core-2.14.5-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d81e6987b27bc7d101c8597e1cd2bcaa2fee5e8e0f356735c7ed34368c471550"}, - {file = "pydantic_core-2.14.5-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:34708cc82c330e303f4ce87758828ef6e457681b58ce0e921b6e97937dd1e2a3"}, - {file = "pydantic_core-2.14.5-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:652c1988019752138b974c28f43751528116bcceadad85f33a258869e641d753"}, - {file = "pydantic_core-2.14.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e4d090e73e0725b2904fdbdd8d73b8802ddd691ef9254577b708d413bf3006e"}, - {file = "pydantic_core-2.14.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5c7d5b5005f177764e96bd584d7bf28d6e26e96f2a541fdddb934c486e36fd59"}, - {file = "pydantic_core-2.14.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:a71891847f0a73b1b9eb86d089baee301477abef45f7eaf303495cd1473613e4"}, - {file = "pydantic_core-2.14.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:a717aef6971208f0851a2420b075338e33083111d92041157bbe0e2713b37325"}, - {file = "pydantic_core-2.14.5-cp37-none-win32.whl", hash = "sha256:de790a3b5aa2124b8b78ae5faa033937a72da8efe74b9231698b5a1dd9be3405"}, - {file = "pydantic_core-2.14.5-cp37-none-win_amd64.whl", hash = "sha256:6c327e9cd849b564b234da821236e6bcbe4f359a42ee05050dc79d8ed2a91588"}, - {file = "pydantic_core-2.14.5-cp38-cp38-macosx_10_7_x86_64.whl", hash = "sha256:ef98ca7d5995a82f43ec0ab39c4caf6a9b994cb0b53648ff61716370eadc43cf"}, - {file = "pydantic_core-2.14.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c6eae413494a1c3f89055da7a5515f32e05ebc1a234c27674a6956755fb2236f"}, - {file = "pydantic_core-2.14.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dcf4e6d85614f7a4956c2de5a56531f44efb973d2fe4a444d7251df5d5c4dcfd"}, - {file = "pydantic_core-2.14.5-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6637560562134b0e17de333d18e69e312e0458ee4455bdad12c37100b7cad706"}, - {file = "pydantic_core-2.14.5-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:77fa384d8e118b3077cccfcaf91bf83c31fe4dc850b5e6ee3dc14dc3d61bdba1"}, - {file = "pydantic_core-2.14.5-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:16e29bad40bcf97aac682a58861249ca9dcc57c3f6be22f506501833ddb8939c"}, - {file = "pydantic_core-2.14.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:531f4b4252fac6ca476fbe0e6f60f16f5b65d3e6b583bc4d87645e4e5ddde331"}, - {file = "pydantic_core-2.14.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:074f3d86f081ce61414d2dc44901f4f83617329c6f3ab49d2bc6c96948b2c26b"}, - {file = "pydantic_core-2.14.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:c2adbe22ab4babbca99c75c5d07aaf74f43c3195384ec07ccbd2f9e3bddaecec"}, - {file = "pydantic_core-2.14.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:0f6116a558fd06d1b7c2902d1c4cf64a5bd49d67c3540e61eccca93f41418124"}, - {file = "pydantic_core-2.14.5-cp38-none-win32.whl", hash = "sha256:fe0a5a1025eb797752136ac8b4fa21aa891e3d74fd340f864ff982d649691867"}, - {file = "pydantic_core-2.14.5-cp38-none-win_amd64.whl", hash = "sha256:079206491c435b60778cf2b0ee5fd645e61ffd6e70c47806c9ed51fc75af078d"}, - {file = "pydantic_core-2.14.5-cp39-cp39-macosx_10_7_x86_64.whl", hash = "sha256:a6a16f4a527aae4f49c875da3cdc9508ac7eef26e7977952608610104244e1b7"}, - {file = "pydantic_core-2.14.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:abf058be9517dc877227ec3223f0300034bd0e9f53aebd63cf4456c8cb1e0863"}, - {file = "pydantic_core-2.14.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:49b08aae5013640a3bfa25a8eebbd95638ec3f4b2eaf6ed82cf0c7047133f03b"}, - {file = "pydantic_core-2.14.5-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c2d97e906b4ff36eb464d52a3bc7d720bd6261f64bc4bcdbcd2c557c02081ed2"}, - {file = "pydantic_core-2.14.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3128e0bbc8c091ec4375a1828d6118bc20404883169ac95ffa8d983b293611e6"}, - {file = "pydantic_core-2.14.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:88e74ab0cdd84ad0614e2750f903bb0d610cc8af2cc17f72c28163acfcf372a4"}, - {file = "pydantic_core-2.14.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c339dabd8ee15f8259ee0f202679b6324926e5bc9e9a40bf981ce77c038553db"}, - {file = "pydantic_core-2.14.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3387277f1bf659caf1724e1afe8ee7dbc9952a82d90f858ebb931880216ea955"}, - {file = "pydantic_core-2.14.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ba6b6b3846cfc10fdb4c971980a954e49d447cd215ed5a77ec8190bc93dd7bc5"}, - {file = "pydantic_core-2.14.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ca61d858e4107ce5e1330a74724fe757fc7135190eb5ce5c9d0191729f033209"}, - {file = "pydantic_core-2.14.5-cp39-none-win32.whl", hash = "sha256:ec1e72d6412f7126eb7b2e3bfca42b15e6e389e1bc88ea0069d0cc1742f477c6"}, - {file = "pydantic_core-2.14.5-cp39-none-win_amd64.whl", hash = "sha256:c0b97ec434041827935044bbbe52b03d6018c2897349670ff8fe11ed24d1d4ab"}, - {file = "pydantic_core-2.14.5-pp310-pypy310_pp73-macosx_10_7_x86_64.whl", hash = "sha256:79e0a2cdbdc7af3f4aee3210b1172ab53d7ddb6a2d8c24119b5706e622b346d0"}, - {file = "pydantic_core-2.14.5-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:678265f7b14e138d9a541ddabbe033012a2953315739f8cfa6d754cc8063e8ca"}, - {file = "pydantic_core-2.14.5-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:95b15e855ae44f0c6341ceb74df61b606e11f1087e87dcb7482377374aac6abe"}, - {file = "pydantic_core-2.14.5-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:09b0e985fbaf13e6b06a56d21694d12ebca6ce5414b9211edf6f17738d82b0f8"}, - {file = "pydantic_core-2.14.5-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3ad873900297bb36e4b6b3f7029d88ff9829ecdc15d5cf20161775ce12306f8a"}, - {file = "pydantic_core-2.14.5-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:2d0ae0d8670164e10accbeb31d5ad45adb71292032d0fdb9079912907f0085f4"}, - {file = "pydantic_core-2.14.5-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:d37f8ec982ead9ba0a22a996129594938138a1503237b87318392a48882d50b7"}, - {file = "pydantic_core-2.14.5-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:35613015f0ba7e14c29ac6c2483a657ec740e5ac5758d993fdd5870b07a61d8b"}, - {file = "pydantic_core-2.14.5-pp37-pypy37_pp73-macosx_10_7_x86_64.whl", hash = "sha256:ab4ea451082e684198636565224bbb179575efc1658c48281b2c866bfd4ddf04"}, - {file = "pydantic_core-2.14.5-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ce601907e99ea5b4adb807ded3570ea62186b17f88e271569144e8cca4409c7"}, - {file = "pydantic_core-2.14.5-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fb2ed8b3fe4bf4506d6dab3b93b83bbc22237e230cba03866d561c3577517d18"}, - {file = "pydantic_core-2.14.5-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:70f947628e074bb2526ba1b151cee10e4c3b9670af4dbb4d73bc8a89445916b5"}, - {file = "pydantic_core-2.14.5-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:4bc536201426451f06f044dfbf341c09f540b4ebdb9fd8d2c6164d733de5e634"}, - {file = "pydantic_core-2.14.5-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f4791cf0f8c3104ac668797d8c514afb3431bc3305f5638add0ba1a5a37e0d88"}, - {file = "pydantic_core-2.14.5-pp38-pypy38_pp73-macosx_10_7_x86_64.whl", hash = "sha256:038c9f763e650712b899f983076ce783175397c848da04985658e7628cbe873b"}, - {file = "pydantic_core-2.14.5-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:27548e16c79702f1e03f5628589c6057c9ae17c95b4c449de3c66b589ead0520"}, - {file = "pydantic_core-2.14.5-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c97bee68898f3f4344eb02fec316db93d9700fb1e6a5b760ffa20d71d9a46ce3"}, - {file = "pydantic_core-2.14.5-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b9b759b77f5337b4ea024f03abc6464c9f35d9718de01cfe6bae9f2e139c397e"}, - {file = "pydantic_core-2.14.5-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:439c9afe34638ace43a49bf72d201e0ffc1a800295bed8420c2a9ca8d5e3dbb3"}, - {file = "pydantic_core-2.14.5-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:ba39688799094c75ea8a16a6b544eb57b5b0f3328697084f3f2790892510d144"}, - {file = "pydantic_core-2.14.5-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:ccd4d5702bb90b84df13bd491be8d900b92016c5a455b7e14630ad7449eb03f8"}, - {file = "pydantic_core-2.14.5-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:81982d78a45d1e5396819bbb4ece1fadfe5f079335dd28c4ab3427cd95389944"}, - {file = "pydantic_core-2.14.5-pp39-pypy39_pp73-macosx_10_7_x86_64.whl", hash = "sha256:7f8210297b04e53bc3da35db08b7302a6a1f4889c79173af69b72ec9754796b8"}, - {file = "pydantic_core-2.14.5-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:8c8a8812fe6f43a3a5b054af6ac2d7b8605c7bcab2804a8a7d68b53f3cd86e00"}, - {file = "pydantic_core-2.14.5-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:206ed23aecd67c71daf5c02c3cd19c0501b01ef3cbf7782db9e4e051426b3d0d"}, - {file = "pydantic_core-2.14.5-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c2027d05c8aebe61d898d4cffd774840a9cb82ed356ba47a90d99ad768f39789"}, - {file = "pydantic_core-2.14.5-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:40180930807ce806aa71eda5a5a5447abb6b6a3c0b4b3b1b1962651906484d68"}, - {file = "pydantic_core-2.14.5-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:615a0a4bff11c45eb3c1996ceed5bdaa2f7b432425253a7c2eed33bb86d80abc"}, - {file = "pydantic_core-2.14.5-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f5e412d717366e0677ef767eac93566582518fe8be923361a5c204c1a62eaafe"}, - {file = "pydantic_core-2.14.5-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:513b07e99c0a267b1d954243845d8a833758a6726a3b5d8948306e3fe14675e3"}, - {file = "pydantic_core-2.14.5.tar.gz", hash = "sha256:6d30226dfc816dd0fdf120cae611dd2215117e4f9b124af8c60ab9093b6e8e71"}, -] - -[package.dependencies] -typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" - -[[package]] -name = "requests" -version = "2.31.0" -description = "Python HTTP for Humans." -optional = false -python-versions = ">=3.7" -files = [ - {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"}, - {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"}, -] - -[package.dependencies] -certifi = ">=2017.4.17" -charset-normalizer = ">=2,<4" -idna = ">=2.5,<4" -urllib3 = ">=1.21.1,<3" - -[package.extras] -socks = ["PySocks (>=1.5.6,!=1.5.7)"] -use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] - -[[package]] -name = "ruff" -version = "0.1.7" -description = "An extremely fast Python linter and code formatter, written in Rust." -optional = false -python-versions = ">=3.7" -files = [ - {file = "ruff-0.1.7-py3-none-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:7f80496854fdc65b6659c271d2c26e90d4d401e6a4a31908e7e334fab4645aac"}, - {file = "ruff-0.1.7-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:1ea109bdb23c2a4413f397ebd8ac32cb498bee234d4191ae1a310af760e5d287"}, - {file = "ruff-0.1.7-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8b0c2de9dd9daf5e07624c24add25c3a490dbf74b0e9bca4145c632457b3b42a"}, - {file = "ruff-0.1.7-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:69a4bed13bc1d5dabf3902522b5a2aadfebe28226c6269694283c3b0cecb45fd"}, - {file = "ruff-0.1.7-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:de02ca331f2143195a712983a57137c5ec0f10acc4aa81f7c1f86519e52b92a1"}, - {file = "ruff-0.1.7-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:45b38c3f8788a65e6a2cab02e0f7adfa88872696839d9882c13b7e2f35d64c5f"}, - {file = "ruff-0.1.7-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6c64cb67b2025b1ac6d58e5ffca8f7b3f7fd921f35e78198411237e4f0db8e73"}, - {file = "ruff-0.1.7-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9dcc6bb2f4df59cb5b4b40ff14be7d57012179d69c6565c1da0d1f013d29951b"}, - {file = "ruff-0.1.7-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:df2bb4bb6bbe921f6b4f5b6fdd8d8468c940731cb9406f274ae8c5ed7a78c478"}, - {file = "ruff-0.1.7-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:276a89bcb149b3d8c1b11d91aa81898fe698900ed553a08129b38d9d6570e717"}, - {file = "ruff-0.1.7-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:90c958fe950735041f1c80d21b42184f1072cc3975d05e736e8d66fc377119ea"}, - {file = "ruff-0.1.7-py3-none-musllinux_1_2_i686.whl", hash = "sha256:6b05e3b123f93bb4146a761b7a7d57af8cb7384ccb2502d29d736eaade0db519"}, - {file = "ruff-0.1.7-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:290ecab680dce94affebefe0bbca2322a6277e83d4f29234627e0f8f6b4fa9ce"}, - {file = "ruff-0.1.7-py3-none-win32.whl", hash = "sha256:416dfd0bd45d1a2baa3b1b07b1b9758e7d993c256d3e51dc6e03a5e7901c7d80"}, - {file = "ruff-0.1.7-py3-none-win_amd64.whl", hash = "sha256:4af95fd1d3b001fc41325064336db36e3d27d2004cdb6d21fd617d45a172dd96"}, - {file = "ruff-0.1.7-py3-none-win_arm64.whl", hash = "sha256:0683b7bfbb95e6df3c7c04fe9d78f631f8e8ba4868dfc932d43d690698057e2e"}, - {file = "ruff-0.1.7.tar.gz", hash = "sha256:dffd699d07abf54833e5f6cc50b85a6ff043715da8788c4a79bcd4ab4734d306"}, -] - -[[package]] -name = "tomli" -version = "2.0.1" -description = "A lil' TOML parser" -optional = false -python-versions = ">=3.7" -files = [ - {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, - {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, -] - -[[package]] -name = "types-requests" -version = "2.31.0.10" -description = "Typing stubs for requests" -optional = false -python-versions = ">=3.7" -files = [ - {file = "types-requests-2.31.0.10.tar.gz", hash = "sha256:dc5852a76f1eaf60eafa81a2e50aefa3d1f015c34cf0cba130930866b1b22a92"}, - {file = "types_requests-2.31.0.10-py3-none-any.whl", hash = "sha256:b32b9a86beffa876c0c3ac99a4cd3b8b51e973fb8e3bd4e0a6bb32c7efad80fc"}, -] - -[package.dependencies] -urllib3 = ">=2" - -[[package]] -name = "typing-extensions" -version = "4.8.0" -description = "Backported and Experimental Type Hints for Python 3.8+" -optional = false -python-versions = ">=3.8" -files = [ - {file = "typing_extensions-4.8.0-py3-none-any.whl", hash = "sha256:8f92fc8806f9a6b641eaa5318da32b44d401efaac0f6678c9bc448ba3605faa0"}, - {file = "typing_extensions-4.8.0.tar.gz", hash = "sha256:df8e4339e9cb77357558cbdbceca33c303714cf861d1eef15e1070055ae8b7ef"}, -] - -[[package]] -name = "urllib3" -version = "2.1.0" -description = "HTTP library with thread-safe connection pooling, file post, and more." -optional = false -python-versions = ">=3.8" -files = [ - {file = "urllib3-2.1.0-py3-none-any.whl", hash = "sha256:55901e917a5896a349ff771be919f8bd99aff50b79fe58fec595eb37bbc56bb3"}, - {file = "urllib3-2.1.0.tar.gz", hash = "sha256:df7aa8afb0148fa78488e7899b2c59b5f4ffcfa82e6c54ccb9dd37c1d7b52d54"}, -] - -[package.extras] -brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] -socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] -zstd = ["zstandard (>=0.18.0)"] - -[[package]] -name = "yarl" -version = "1.9.4" -description = "Yet another URL library" -optional = false -python-versions = ">=3.7" -files = [ - {file = "yarl-1.9.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a8c1df72eb746f4136fe9a2e72b0c9dc1da1cbd23b5372f94b5820ff8ae30e0e"}, - {file = "yarl-1.9.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a3a6ed1d525bfb91b3fc9b690c5a21bb52de28c018530ad85093cc488bee2dd2"}, - {file = "yarl-1.9.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c38c9ddb6103ceae4e4498f9c08fac9b590c5c71b0370f98714768e22ac6fa66"}, - {file = "yarl-1.9.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d9e09c9d74f4566e905a0b8fa668c58109f7624db96a2171f21747abc7524234"}, - {file = "yarl-1.9.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b8477c1ee4bd47c57d49621a062121c3023609f7a13b8a46953eb6c9716ca392"}, - {file = "yarl-1.9.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d5ff2c858f5f6a42c2a8e751100f237c5e869cbde669a724f2062d4c4ef93551"}, - {file = "yarl-1.9.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:357495293086c5b6d34ca9616a43d329317feab7917518bc97a08f9e55648455"}, - {file = "yarl-1.9.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:54525ae423d7b7a8ee81ba189f131054defdb122cde31ff17477951464c1691c"}, - {file = "yarl-1.9.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:801e9264d19643548651b9db361ce3287176671fb0117f96b5ac0ee1c3530d53"}, - {file = "yarl-1.9.4-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e516dc8baf7b380e6c1c26792610230f37147bb754d6426462ab115a02944385"}, - {file = "yarl-1.9.4-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:7d5aaac37d19b2904bb9dfe12cdb08c8443e7ba7d2852894ad448d4b8f442863"}, - {file = "yarl-1.9.4-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:54beabb809ffcacbd9d28ac57b0db46e42a6e341a030293fb3185c409e626b8b"}, - {file = "yarl-1.9.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bac8d525a8dbc2a1507ec731d2867025d11ceadcb4dd421423a5d42c56818541"}, - {file = "yarl-1.9.4-cp310-cp310-win32.whl", hash = "sha256:7855426dfbddac81896b6e533ebefc0af2f132d4a47340cee6d22cac7190022d"}, - {file = "yarl-1.9.4-cp310-cp310-win_amd64.whl", hash = "sha256:848cd2a1df56ddbffeb375535fb62c9d1645dde33ca4d51341378b3f5954429b"}, - {file = "yarl-1.9.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:35a2b9396879ce32754bd457d31a51ff0a9d426fd9e0e3c33394bf4b9036b099"}, - {file = "yarl-1.9.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4c7d56b293cc071e82532f70adcbd8b61909eec973ae9d2d1f9b233f3d943f2c"}, - {file = "yarl-1.9.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d8a1c6c0be645c745a081c192e747c5de06e944a0d21245f4cf7c05e457c36e0"}, - {file = "yarl-1.9.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4b3c1ffe10069f655ea2d731808e76e0f452fc6c749bea04781daf18e6039525"}, - {file = "yarl-1.9.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:549d19c84c55d11687ddbd47eeb348a89df9cb30e1993f1b128f4685cd0ebbf8"}, - {file = "yarl-1.9.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a7409f968456111140c1c95301cadf071bd30a81cbd7ab829169fb9e3d72eae9"}, - {file = "yarl-1.9.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e23a6d84d9d1738dbc6e38167776107e63307dfc8ad108e580548d1f2c587f42"}, - {file = "yarl-1.9.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d8b889777de69897406c9fb0b76cdf2fd0f31267861ae7501d93003d55f54fbe"}, - {file = "yarl-1.9.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:03caa9507d3d3c83bca08650678e25364e1843b484f19986a527630ca376ecce"}, - {file = "yarl-1.9.4-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:4e9035df8d0880b2f1c7f5031f33f69e071dfe72ee9310cfc76f7b605958ceb9"}, - {file = "yarl-1.9.4-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:c0ec0ed476f77db9fb29bca17f0a8fcc7bc97ad4c6c1d8959c507decb22e8572"}, - {file = "yarl-1.9.4-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:ee04010f26d5102399bd17f8df8bc38dc7ccd7701dc77f4a68c5b8d733406958"}, - {file = "yarl-1.9.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:49a180c2e0743d5d6e0b4d1a9e5f633c62eca3f8a86ba5dd3c471060e352ca98"}, - {file = "yarl-1.9.4-cp311-cp311-win32.whl", hash = "sha256:81eb57278deb6098a5b62e88ad8281b2ba09f2f1147c4767522353eaa6260b31"}, - {file = "yarl-1.9.4-cp311-cp311-win_amd64.whl", hash = "sha256:d1d2532b340b692880261c15aee4dc94dd22ca5d61b9db9a8a361953d36410b1"}, - {file = "yarl-1.9.4-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0d2454f0aef65ea81037759be5ca9947539667eecebca092733b2eb43c965a81"}, - {file = "yarl-1.9.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:44d8ffbb9c06e5a7f529f38f53eda23e50d1ed33c6c869e01481d3fafa6b8142"}, - {file = "yarl-1.9.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:aaaea1e536f98754a6e5c56091baa1b6ce2f2700cc4a00b0d49eca8dea471074"}, - {file = "yarl-1.9.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3777ce5536d17989c91696db1d459574e9a9bd37660ea7ee4d3344579bb6f129"}, - {file = "yarl-1.9.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9fc5fc1eeb029757349ad26bbc5880557389a03fa6ada41703db5e068881e5f2"}, - {file = "yarl-1.9.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ea65804b5dc88dacd4a40279af0cdadcfe74b3e5b4c897aa0d81cf86927fee78"}, - {file = "yarl-1.9.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa102d6d280a5455ad6a0f9e6d769989638718e938a6a0a2ff3f4a7ff8c62cc4"}, - {file = "yarl-1.9.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:09efe4615ada057ba2d30df871d2f668af661e971dfeedf0c159927d48bbeff0"}, - {file = "yarl-1.9.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:008d3e808d03ef28542372d01057fd09168419cdc8f848efe2804f894ae03e51"}, - {file = "yarl-1.9.4-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:6f5cb257bc2ec58f437da2b37a8cd48f666db96d47b8a3115c29f316313654ff"}, - {file = "yarl-1.9.4-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:992f18e0ea248ee03b5a6e8b3b4738850ae7dbb172cc41c966462801cbf62cf7"}, - {file = "yarl-1.9.4-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:0e9d124c191d5b881060a9e5060627694c3bdd1fe24c5eecc8d5d7d0eb6faabc"}, - {file = "yarl-1.9.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:3986b6f41ad22988e53d5778f91855dc0399b043fc8946d4f2e68af22ee9ff10"}, - {file = "yarl-1.9.4-cp312-cp312-win32.whl", hash = "sha256:4b21516d181cd77ebd06ce160ef8cc2a5e9ad35fb1c5930882baff5ac865eee7"}, - {file = "yarl-1.9.4-cp312-cp312-win_amd64.whl", hash = "sha256:a9bd00dc3bc395a662900f33f74feb3e757429e545d831eef5bb280252631984"}, - {file = "yarl-1.9.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:63b20738b5aac74e239622d2fe30df4fca4942a86e31bf47a81a0e94c14df94f"}, - {file = "yarl-1.9.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7d7f7de27b8944f1fee2c26a88b4dabc2409d2fea7a9ed3df79b67277644e17"}, - {file = "yarl-1.9.4-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c74018551e31269d56fab81a728f683667e7c28c04e807ba08f8c9e3bba32f14"}, - {file = "yarl-1.9.4-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ca06675212f94e7a610e85ca36948bb8fc023e458dd6c63ef71abfd482481aa5"}, - {file = "yarl-1.9.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5aef935237d60a51a62b86249839b51345f47564208c6ee615ed2a40878dccdd"}, - {file = "yarl-1.9.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2b134fd795e2322b7684155b7855cc99409d10b2e408056db2b93b51a52accc7"}, - {file = "yarl-1.9.4-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:d25039a474c4c72a5ad4b52495056f843a7ff07b632c1b92ea9043a3d9950f6e"}, - {file = "yarl-1.9.4-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:f7d6b36dd2e029b6bcb8a13cf19664c7b8e19ab3a58e0fefbb5b8461447ed5ec"}, - {file = "yarl-1.9.4-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:957b4774373cf6f709359e5c8c4a0af9f6d7875db657adb0feaf8d6cb3c3964c"}, - {file = "yarl-1.9.4-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:d7eeb6d22331e2fd42fce928a81c697c9ee2d51400bd1a28803965883e13cead"}, - {file = "yarl-1.9.4-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:6a962e04b8f91f8c4e5917e518d17958e3bdee71fd1d8b88cdce74dd0ebbf434"}, - {file = "yarl-1.9.4-cp37-cp37m-win32.whl", hash = "sha256:f3bc6af6e2b8f92eced34ef6a96ffb248e863af20ef4fde9448cc8c9b858b749"}, - {file = "yarl-1.9.4-cp37-cp37m-win_amd64.whl", hash = "sha256:ad4d7a90a92e528aadf4965d685c17dacff3df282db1121136c382dc0b6014d2"}, - {file = "yarl-1.9.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:ec61d826d80fc293ed46c9dd26995921e3a82146feacd952ef0757236fc137be"}, - {file = "yarl-1.9.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:8be9e837ea9113676e5754b43b940b50cce76d9ed7d2461df1af39a8ee674d9f"}, - {file = "yarl-1.9.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:bef596fdaa8f26e3d66af846bbe77057237cb6e8efff8cd7cc8dff9a62278bbf"}, - {file = "yarl-1.9.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2d47552b6e52c3319fede1b60b3de120fe83bde9b7bddad11a69fb0af7db32f1"}, - {file = "yarl-1.9.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:84fc30f71689d7fc9168b92788abc977dc8cefa806909565fc2951d02f6b7d57"}, - {file = "yarl-1.9.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4aa9741085f635934f3a2583e16fcf62ba835719a8b2b28fb2917bb0537c1dfa"}, - {file = "yarl-1.9.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:206a55215e6d05dbc6c98ce598a59e6fbd0c493e2de4ea6cc2f4934d5a18d130"}, - {file = "yarl-1.9.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07574b007ee20e5c375a8fe4a0789fad26db905f9813be0f9fef5a68080de559"}, - {file = "yarl-1.9.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:5a2e2433eb9344a163aced6a5f6c9222c0786e5a9e9cac2c89f0b28433f56e23"}, - {file = "yarl-1.9.4-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:6ad6d10ed9b67a382b45f29ea028f92d25bc0bc1daf6c5b801b90b5aa70fb9ec"}, - {file = "yarl-1.9.4-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:6fe79f998a4052d79e1c30eeb7d6c1c1056ad33300f682465e1b4e9b5a188b78"}, - {file = "yarl-1.9.4-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:a825ec844298c791fd28ed14ed1bffc56a98d15b8c58a20e0e08c1f5f2bea1be"}, - {file = "yarl-1.9.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8619d6915b3b0b34420cf9b2bb6d81ef59d984cb0fde7544e9ece32b4b3043c3"}, - {file = "yarl-1.9.4-cp38-cp38-win32.whl", hash = "sha256:686a0c2f85f83463272ddffd4deb5e591c98aac1897d65e92319f729c320eece"}, - {file = "yarl-1.9.4-cp38-cp38-win_amd64.whl", hash = "sha256:a00862fb23195b6b8322f7d781b0dc1d82cb3bcac346d1e38689370cc1cc398b"}, - {file = "yarl-1.9.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:604f31d97fa493083ea21bd9b92c419012531c4e17ea6da0f65cacdcf5d0bd27"}, - {file = "yarl-1.9.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8a854227cf581330ffa2c4824d96e52ee621dd571078a252c25e3a3b3d94a1b1"}, - {file = "yarl-1.9.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ba6f52cbc7809cd8d74604cce9c14868306ae4aa0282016b641c661f981a6e91"}, - {file = "yarl-1.9.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a6327976c7c2f4ee6816eff196e25385ccc02cb81427952414a64811037bbc8b"}, - {file = "yarl-1.9.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8397a3817d7dcdd14bb266283cd1d6fc7264a48c186b986f32e86d86d35fbac5"}, - {file = "yarl-1.9.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e0381b4ce23ff92f8170080c97678040fc5b08da85e9e292292aba67fdac6c34"}, - {file = "yarl-1.9.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:23d32a2594cb5d565d358a92e151315d1b2268bc10f4610d098f96b147370136"}, - {file = "yarl-1.9.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ddb2a5c08a4eaaba605340fdee8fc08e406c56617566d9643ad8bf6852778fc7"}, - {file = "yarl-1.9.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:26a1dc6285e03f3cc9e839a2da83bcbf31dcb0d004c72d0730e755b33466c30e"}, - {file = "yarl-1.9.4-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:18580f672e44ce1238b82f7fb87d727c4a131f3a9d33a5e0e82b793362bf18b4"}, - {file = "yarl-1.9.4-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:29e0f83f37610f173eb7e7b5562dd71467993495e568e708d99e9d1944f561ec"}, - {file = "yarl-1.9.4-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:1f23e4fe1e8794f74b6027d7cf19dc25f8b63af1483d91d595d4a07eca1fb26c"}, - {file = "yarl-1.9.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:db8e58b9d79200c76956cefd14d5c90af54416ff5353c5bfd7cbe58818e26ef0"}, - {file = "yarl-1.9.4-cp39-cp39-win32.whl", hash = "sha256:c7224cab95645c7ab53791022ae77a4509472613e839dab722a72abe5a684575"}, - {file = "yarl-1.9.4-cp39-cp39-win_amd64.whl", hash = "sha256:824d6c50492add5da9374875ce72db7a0733b29c2394890aef23d533106e2b15"}, - {file = "yarl-1.9.4-py3-none-any.whl", hash = "sha256:928cecb0ef9d5a7946eb6ff58417ad2fe9375762382f1bf5c55e61645f2c43ad"}, - {file = "yarl-1.9.4.tar.gz", hash = "sha256:566db86717cf8080b99b58b083b773a908ae40f06681e87e589a976faf8246bf"}, -] - -[package.dependencies] -idna = ">=2.0" -multidict = ">=4.0" - -[metadata] -lock-version = "2.0" -python-versions = "^3.8" -content-hash = "3b915978f26bcf97d5ad13b1a38f7e344b7a960c1a21b5e253a0c5a6d9b7a879" diff --git a/py.typed b/py.typed new file mode 100644 index 00000000..3e38f1a9 --- /dev/null +++ b/py.typed @@ -0,0 +1 @@ +# Marker file for PEP 561. The package enables type hints. diff --git a/pylintrc b/pylintrc new file mode 100644 index 00000000..d1653ae1 --- /dev/null +++ b/pylintrc @@ -0,0 +1,663 @@ +[MAIN] + +# Analyse import fallback blocks. This can be used to support both Python 2 and +# 3 compatible code, which means that the block might have code that exists +# only in one or another interpreter, leading to false positives when analysed. +analyse-fallback-blocks=no + +# Clear in-memory caches upon conclusion of linting. Useful if running pylint +# in a server-like mode. +clear-cache-post-run=no + +# Load and enable all available extensions. Use --list-extensions to see a list +# all available extensions. +#enable-all-extensions= + +# In error mode, messages with a category besides ERROR or FATAL are +# suppressed, and no reports are done by default. Error mode is compatible with +# disabling specific errors. +#errors-only= + +# Always return a 0 (non-error) status code, even if lint errors are found. +# This is primarily useful in continuous integration scripts. +#exit-zero= + +# A comma-separated list of package or module names from where C extensions may +# be loaded. Extensions are loading into the active Python interpreter and may +# run arbitrary code. +extension-pkg-allow-list= + +# A comma-separated list of package or module names from where C extensions may +# be loaded. Extensions are loading into the active Python interpreter and may +# run arbitrary code. (This is an alternative name to extension-pkg-allow-list +# for backward compatibility.) +extension-pkg-whitelist= + +# Return non-zero exit code if any of these messages/categories are detected, +# even if score is above --fail-under value. Syntax same as enable. Messages +# specified are enabled, while categories only check already-enabled messages. +fail-on= + +# Specify a score threshold under which the program will exit with error. +fail-under=10 + +# Interpret the stdin as a python script, whose filename needs to be passed as +# the module_or_package argument. +#from-stdin= + +# Files or directories to be skipped. They should be base names, not paths. +ignore=CVS + +# Add files or directories matching the regular expressions patterns to the +# ignore-list. The regex matches against paths and can be in Posix or Windows +# format. Because '\\' represents the directory delimiter on Windows systems, +# it can't be used as an escape character. +ignore-paths=^src/mistralai/extra/.*$, + ^src/mistralai/conversations.py + +# Files or directories matching the regular expression patterns are skipped. +# The regex matches against base names, not paths. The default value ignores +# Emacs file locks +ignore-patterns=^\.# + +# List of module names for which member attributes should not be checked and +# will not be imported (useful for modules/projects where namespaces are +# manipulated during runtime and thus existing member attributes cannot be +# deduced by static analysis). It supports qualified module names, as well as +# Unix pattern matching. +ignored-modules= + +# Python code to execute, usually for sys.path manipulation such as +# pygtk.require(). +#init-hook= + +# Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the +# number of processors available to use, and will cap the count on Windows to +# avoid hangs. +jobs=1 + +# Control the amount of potential inferred values when inferring a single +# object. This can help the performance when dealing with large functions or +# complex, nested conditions. +limit-inference-results=100 + +# List of plugins (as comma separated values of python module names) to load, +# usually to register additional checkers. +load-plugins= + +# Pickle collected data for later comparisons. +persistent=yes + +# Minimum Python version to use for version dependent checks. Will default to +# the version used to run pylint. +py-version=3.10 + +# Discover python modules and packages in the file system subtree. +recursive=no + +# Add paths to the list of the source roots. Supports globbing patterns. The +# source root is an absolute path or a path relative to the current working +# directory used to determine a package namespace for modules located under the +# source root. +source-roots=src + +# When enabled, pylint would attempt to guess common misconfiguration and emit +# user-friendly hints instead of false-positive error messages. +# Note: suggestion-mode was removed in pylint 3.0 + +# Allow loading of arbitrary C extensions. Extensions are imported into the +# active Python interpreter and may run arbitrary code. +unsafe-load-any-extension=no + +# In verbose mode, extra non-checker-related info will be displayed. +#verbose= + + +[BASIC] + +# Naming style matching correct argument names. +argument-naming-style=snake_case + +# Regular expression matching correct argument names. Overrides argument- +# naming-style. If left empty, argument names will be checked with the set +# naming style. +#argument-rgx= + +# Naming style matching correct attribute names. +#attr-naming-style=snake_case + +# Regular expression matching correct attribute names. Overrides attr-naming- +# style. If left empty, attribute names will be checked with the set naming +# style. +attr-rgx=[^\W\d][^\W]*|__.*__$ + +# Bad variable names which should always be refused, separated by a comma. +bad-names= + +# Bad variable names regexes, separated by a comma. If names match any regex, +# they will always be refused +bad-names-rgxs= + +# Naming style matching correct class attribute names. +class-attribute-naming-style=any + +# Regular expression matching correct class attribute names. Overrides class- +# attribute-naming-style. If left empty, class attribute names will be checked +# with the set naming style. +#class-attribute-rgx= + +# Naming style matching correct class constant names. +class-const-naming-style=UPPER_CASE + +# Regular expression matching correct class constant names. Overrides class- +# const-naming-style. If left empty, class constant names will be checked with +# the set naming style. +#class-const-rgx= + +# Naming style matching correct class names. +class-naming-style=PascalCase + +# Regular expression matching correct class names. Overrides class-naming- +# style. If left empty, class names will be checked with the set naming style. +#class-rgx= + +# Naming style matching correct constant names. +const-naming-style=UPPER_CASE + +# Regular expression matching correct constant names. Overrides const-naming- +# style. If left empty, constant names will be checked with the set naming +# style. +#const-rgx= + +# Minimum line length for functions/classes that require docstrings, shorter +# ones are exempt. +docstring-min-length=-1 + +# Naming style matching correct function names. +function-naming-style=snake_case + +# Regular expression matching correct function names. Overrides function- +# naming-style. If left empty, function names will be checked with the set +# naming style. +#function-rgx= + +# Good variable names which should always be accepted, separated by a comma. +good-names=i, + j, + k, + ex, + Run, + _, + e, + n, + id + +# Good variable names regexes, separated by a comma. If names match any regex, +# they will always be accepted +good-names-rgxs= + +# Include a hint for the correct naming format with invalid-name. +include-naming-hint=no + +# Naming style matching correct inline iteration names. +inlinevar-naming-style=any + +# Regular expression matching correct inline iteration names. Overrides +# inlinevar-naming-style. If left empty, inline iteration names will be checked +# with the set naming style. +#inlinevar-rgx= + +# Naming style matching correct method names. +method-naming-style=snake_case + +# Regular expression matching correct method names. Overrides method-naming- +# style. If left empty, method names will be checked with the set naming style. +#method-rgx= + +# Naming style matching correct module names. +module-naming-style=snake_case + +# Regular expression matching correct module names. Overrides module-naming- +# style. If left empty, module names will be checked with the set naming style. +#module-rgx= + +# Colon-delimited sets of names that determine each other's naming style when +# the name regexes allow several styles. +name-group= + +# Regular expression which should only match function or class names that do +# not require a docstring. +no-docstring-rgx=^_ + +# List of decorators that produce properties, such as abc.abstractproperty. Add +# to this list to register other decorators that produce valid properties. +# These decorators are taken in consideration only for invalid-name. +property-classes=abc.abstractproperty + +# Regular expression matching correct type alias names. If left empty, type +# alias names will be checked with the set naming style. +typealias-rgx=.* + +# Regular expression matching correct type variable names. If left empty, type +# variable names will be checked with the set naming style. +#typevar-rgx= + +# Naming style matching correct variable names. +variable-naming-style=snake_case + +# Regular expression matching correct variable names. Overrides variable- +# naming-style. If left empty, variable names will be checked with the set +# naming style. +#variable-rgx= + + +[CLASSES] + +# Warn about protected attribute access inside special methods +check-protected-access-in-special-methods=no + +# List of method names used to declare (i.e. assign) instance attributes. +defining-attr-methods=__init__, + __new__, + setUp, + asyncSetUp, + __post_init__ + +# List of member names, which should be excluded from the protected access +# warning. +exclude-protected=_asdict,_fields,_replace,_source,_make,os._exit + +# List of valid names for the first argument in a class method. +valid-classmethod-first-arg=cls + +# List of valid names for the first argument in a metaclass class method. +valid-metaclass-classmethod-first-arg=mcs + + +[DESIGN] + +# List of regular expressions of class ancestor names to ignore when counting +# public methods (see R0903) +exclude-too-few-public-methods= + +# List of qualified class names to ignore when counting class parents (see +# R0901) +ignored-parents= + +# Maximum number of arguments for function / method. +max-args=5 + +# Maximum number of attributes for a class (see R0902). +max-attributes=7 + +# Maximum number of boolean expressions in an if statement (see R0916). +max-bool-expr=5 + +# Maximum number of branch for function / method body. +max-branches=12 + +# Maximum number of locals for function / method body. +max-locals=15 + +# Maximum number of parents for a class (see R0901). +max-parents=7 + +# Maximum number of public methods for a class (see R0904). +max-public-methods=25 + +# Maximum number of return / yield for function / method body. +max-returns=6 + +# Maximum number of statements in function / method body. +max-statements=50 + +# Minimum number of public methods for a class (see R0903). +min-public-methods=2 + + +[EXCEPTIONS] + +# Exceptions that will emit a warning when caught. +overgeneral-exceptions=builtins.BaseException,builtins.Exception + + +[FORMAT] + +# Expected format of line ending, e.g. empty (any line ending), LF or CRLF. +expected-line-ending-format= + +# Regexp for a line that is allowed to be longer than the limit. +ignore-long-lines=^\s*(# )??$ + +# Number of spaces of indent required inside a hanging or continued line. +indent-after-paren=4 + +# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 +# tab). +indent-string=' ' + +# Maximum number of characters on a single line. +max-line-length=100 + +# Maximum number of lines in a module. +max-module-lines=1000 + +# Allow the body of a class to be on the same line as the declaration if body +# contains single statement. +single-line-class-stmt=no + +# Allow the body of an if to be on the same line as the test if there is no +# else. +single-line-if-stmt=no + + +[IMPORTS] + +# List of modules that can be imported at any level, not just the top level +# one. +allow-any-import-level= + +# Allow explicit reexports by alias from a package __init__. +allow-reexport-from-package=no + +# Allow wildcard imports from modules that define __all__. +allow-wildcard-with-all=no + +# Deprecated modules which should not be used, separated by a comma. +deprecated-modules= + +# Output a graph (.gv or any supported image format) of external dependencies +# to the given file (report RP0402 must not be disabled). +ext-import-graph= + +# Output a graph (.gv or any supported image format) of all (i.e. internal and +# external) dependencies to the given file (report RP0402 must not be +# disabled). +import-graph= + +# Output a graph (.gv or any supported image format) of internal dependencies +# to the given file (report RP0402 must not be disabled). +int-import-graph= + +# Force import order to recognize a module as part of the standard +# compatibility libraries. +known-standard-library= + +# Force import order to recognize a module as part of a third party library. +known-third-party=enchant + +# Couples of modules and preferred modules, separated by a comma. +preferred-modules= + + +[LOGGING] + +# The type of string formatting that logging methods do. `old` means using % +# formatting, `new` is for `{}` formatting. +logging-format-style=old + +# Logging modules to check that the string format arguments are in logging +# function parameter format. +logging-modules=logging + + +[MESSAGES CONTROL] + +# Only show warnings with the listed confidence levels. Leave empty to show +# all. Valid levels: HIGH, CONTROL_FLOW, INFERENCE, INFERENCE_FAILURE, +# UNDEFINED. +confidence=HIGH, + CONTROL_FLOW, + INFERENCE, + INFERENCE_FAILURE, + UNDEFINED + +# Disable the message, report, category or checker with the given id(s). You +# can either give multiple identifiers separated by comma (,) or put this +# option multiple times (only on the command line, not in the configuration +# file where it should appear only once). You can also use "--disable=all" to +# disable everything first and then re-enable specific checks. For example, if +# you want to run only the similarities checker, you can use "--disable=all +# --enable=similarities". If you want to run only the classes checker, but have +# no Warning level messages displayed, use "--disable=all --enable=classes +# --disable=W". +disable=raw-checker-failed, + bad-inline-option, + locally-disabled, + file-ignored, + suppressed-message, + useless-suppression, + deprecated-pragma, + use-implicit-booleaness-not-comparison-to-string, + use-implicit-booleaness-not-comparison-to-zero, + use-symbolic-message-instead, + trailing-whitespace, + line-too-long, + missing-class-docstring, + missing-module-docstring, + missing-function-docstring, + too-many-instance-attributes, + wrong-import-order, + too-many-arguments, + broad-exception-raised, + too-few-public-methods, + too-many-branches, + duplicate-code, + trailing-newlines, + too-many-public-methods, + too-many-locals, + too-many-lines, + using-constant-test, + too-many-statements, + cyclic-import, + too-many-nested-blocks, + too-many-boolean-expressions, + no-else-raise, + bare-except, + broad-exception-caught, + fixme, + relative-beyond-top-level, + consider-using-with, + wildcard-import, + unused-wildcard-import + +# Enable the message, report, category or checker with the given id(s). You can +# either give multiple identifier separated by comma (,) or put this option +# multiple time (only on the command line, not in the configuration file where +# it should appear only once). See also the "--disable" option for examples. +enable= + + +[METHOD_ARGS] + +# List of qualified names (i.e., library.method) which require a timeout +# parameter e.g. 'requests.api.get,requests.api.post' +timeout-methods=requests.api.delete,requests.api.get,requests.api.head,requests.api.options,requests.api.patch,requests.api.post,requests.api.put,requests.api.request + + +[MISCELLANEOUS] + +# List of note tags to take in consideration, separated by a comma. +notes=FIXME, + XXX, + TODO + +# Regular expression of note tags to take in consideration. +notes-rgx= + + +[REFACTORING] + +# Maximum number of nested blocks for function / method body +max-nested-blocks=5 + +# Complete name of functions that never returns. When checking for +# inconsistent-return-statements if a never returning function is called then +# it will be considered as an explicit return statement and no message will be +# printed. +never-returning-functions=sys.exit,argparse.parse_error + + +[REPORTS] + +# Python expression which should return a score less than or equal to 10. You +# have access to the variables 'fatal', 'error', 'warning', 'refactor', +# 'convention', and 'info' which contain the number of messages in each +# category, as well as 'statement' which is the total number of statements +# analyzed. This score is used by the global evaluation report (RP0004). +evaluation=max(0, 0 if fatal else 10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)) + +# Template used to display messages. This is a python new-style format string +# used to format the message information. See doc for all details. +msg-template= + +# Set the output format. Available formats are: text, parseable, colorized, +# json2 (improved json format), json (old json format) and msvs (visual +# studio). You can also give a reporter class, e.g. +# mypackage.mymodule.MyReporterClass. +#output-format= + +# Tells whether to display a full report or only the messages. +reports=no + +# Activate the evaluation score. +score=yes + + +[SIMILARITIES] + +# Comments are removed from the similarity computation +ignore-comments=yes + +# Docstrings are removed from the similarity computation +ignore-docstrings=yes + +# Imports are removed from the similarity computation +ignore-imports=yes + +# Signatures are removed from the similarity computation +ignore-signatures=yes + +# Minimum lines number of a similarity. +min-similarity-lines=4 + + +[SPELLING] + +# Limits count of emitted suggestions for spelling mistakes. +max-spelling-suggestions=4 + +# Spelling dictionary name. No available dictionaries : You need to install +# both the python package and the system dependency for enchant to work. +spelling-dict= + +# List of comma separated words that should be considered directives if they +# appear at the beginning of a comment and should not be checked. +spelling-ignore-comment-directives=fmt: on,fmt: off,noqa:,noqa,nosec,isort:skip,mypy: + +# List of comma separated words that should not be checked. +spelling-ignore-words= + +# A path to a file that contains the private dictionary; one word per line. +spelling-private-dict-file= + +# Tells whether to store unknown words to the private dictionary (see the +# --spelling-private-dict-file option) instead of raising a message. +spelling-store-unknown-words=no + + +[STRING] + +# This flag controls whether inconsistent-quotes generates a warning when the +# character used as a quote delimiter is used inconsistently within a module. +check-quote-consistency=no + +# This flag controls whether the implicit-str-concat should generate a warning +# on implicit string concatenation in sequences defined over several lines. +check-str-concat-over-line-jumps=no + + +[TYPECHECK] + +# List of decorators that produce context managers, such as +# contextlib.contextmanager. Add to this list to register other decorators that +# produce valid context managers. +contextmanager-decorators=contextlib.contextmanager + +# List of members which are set dynamically and missed by pylint inference +# system, and so shouldn't trigger E1101 when accessed. Python regular +# expressions are accepted. +generated-members= + +# Tells whether to warn about missing members when the owner of the attribute +# is inferred to be None. +ignore-none=yes + +# This flag controls whether pylint should warn about no-member and similar +# checks whenever an opaque object is returned when inferring. The inference +# can return multiple potential results while evaluating a Python object, but +# some branches might not be evaluated, which results in partial inference. In +# that case, it might be useful to still emit no-member and other checks for +# the rest of the inferred objects. +ignore-on-opaque-inference=yes + +# List of symbolic message names to ignore for Mixin members. +ignored-checks-for-mixins=no-member, + not-async-context-manager, + not-context-manager, + attribute-defined-outside-init + +# List of class names for which member attributes should not be checked (useful +# for classes with dynamically set attributes). This supports the use of +# qualified names. +ignored-classes=optparse.Values,thread._local,_thread._local,argparse.Namespace + +# Show a hint with possible names when a member name was not found. The aspect +# of finding the hint is based on edit distance. +missing-member-hint=yes + +# The minimum edit distance a name should have in order to be considered a +# similar match for a missing member name. +missing-member-hint-distance=1 + +# The total number of similar names that should be taken in consideration when +# showing a hint for a missing member. +missing-member-max-choices=1 + +# Regex pattern to define which classes are considered mixins. +mixin-class-rgx=.*[Mm]ixin + +# List of decorators that change the signature of a decorated function. +signature-mutators= + + +[VARIABLES] + +# List of additional names supposed to be defined in builtins. Remember that +# you should avoid defining new builtins when possible. +additional-builtins= + +# Tells whether unused global variables should be treated as a violation. +allow-global-unused-variables=yes + +# List of names allowed to shadow builtins +allowed-redefined-builtins=id,object,input,dir + +# List of strings which can identify a callback function by name. A callback +# name must start or end with one of those strings. +callbacks=cb_, + _cb + +# A regular expression matching the name of dummy variables (i.e. expected to +# not be used). +dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_ + +# Argument names that match this expression will be ignored. +ignored-argument-names=_.*|^ignored_|^unused_ + +# Tells whether we should check for unused import in __init__ files. +init-import=no + +# List of qualified module names which can have objects that can redefine +# builtins. +redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io diff --git a/pyproject.toml b/pyproject.toml index 89e68b2d..56d23bb8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,43 +1,129 @@ -[tool.poetry] +[project] name = "mistralai" -version = "0.0.1" -description = "" -authors = ["Bam4d "] +version = "2.0.0rc1" +description = "Python Client SDK for the Mistral AI API." +authors = [{ name = "Mistral" }] +requires-python = ">=3.10" readme = "README.md" +dependencies = [ + "eval-type-backport >=0.2.0", + "httpx >=0.28.1", + "pydantic >=2.11.2", + "python-dateutil >=2.8.2", + "typing-inspection >=0.4.0", + "pyyaml (>=6.0.2,<7.0.0)", + "invoke (>=2.2.0,<3.0.0)", + "opentelemetry-sdk (>=1.33.1,<2.0.0)", + "opentelemetry-api (>=1.33.1,<2.0.0)", + "opentelemetry-exporter-otlp-proto-http (>=1.37.0,<2.0.0)", + "opentelemetry-semantic-conventions (>=0.59b0,<0.61)", +] -[tool.ruff] -select = ["E", "F", "W", "Q", "I"] -ignore = ["E203"] -fixable = ["ALL"] -unfixable = [] -line-length = 120 +[project.optional-dependencies] +gcp = [ + "google-auth >=2.27.0", + "requests >=2.32.3", +] +agents = [ + "mcp >=1.0,<2.0", + "griffe >=1.7.3,<2.0", + "authlib >=1.5.2,<2.0", +] +realtime = [ + "websockets >=13.0", +] +[project.urls] +Repository = "https://github.com/mistralai/client-python.git" -[tool.mypy] -disallow_untyped_defs = true -show_error_codes = true -no_implicit_optional = true -warn_return_any = true -warn_unused_ignores = true -exclude = ["docs", "tests", "examples", "tools", "build"] +[dependency-groups] +dev = [ + "mypy==1.15.0", + "pylint==3.2.3", + "pytest>=8.2.2,<9", + "pytest-asyncio>=0.23.7,<0.24", + "types-python-dateutil>=2.9.0.20240316,<3", + "types-authlib>=1.5.0.20250516,<2", + "types-pyyaml>=6.0.12.20250516,<7", + "mcp>=1.0,<2", + "griffe>=1.7.3,<2", + "authlib>=1.5.2,<2", + "websockets >=13.0", +] +lint = [ + "ruff>=0.11.10,<0.12", + "pyright>=1.1.401,<2", + "mypy==1.15.0", +] + +[tool.uv] +default-groups = [ + "dev", + "lint", +] + +[tool.setuptools.package-data] +"*" = ["py.typed", "src/mistralai/client/py.typed"] + +[tool.hatch.build] +dev-mode-dirs = [ + "src", + "packages/azure/src", + "packages/gcp/src", +] +[tool.hatch.build.targets.sdist] +include = [ + "src/mistralai", + "packages/azure/src/mistralai", + "packages/gcp/src/mistralai", +] -[tool.poetry.dependencies] -python = "^3.8" -aiohttp = "^3.9.1" -backoff = "^2.2.1" -orjson = "^3.9.10" -requests = "^2.31.0" -pydantic = "^2.5.2" +[tool.hatch.build.targets.sdist.force-include] +"py.typed" = "py.typed" +"src/mistralai/client/py.typed" = "src/mistralai/client/py.typed" +[tool.hatch.build.targets.wheel] +include = [ + "src/mistralai", + "packages/azure/src/mistralai", + "packages/gcp/src/mistralai", +] -[tool.poetry.group.dev.dependencies] -ruff = "^0.1.6" -mypy = "^1.7.1" -types-requests = "^2.31.0.10" +[tool.hatch.build.targets.wheel.sources] +"src/mistralai" = "mistralai" +"packages/azure/src/mistralai/azure" = "mistralai/azure" +"packages/gcp/src/mistralai/gcp" = "mistralai/gcp" [build-system] -requires = ["poetry-core"] -build-backend = "poetry.core.masonry.api" +requires = ["hatchling"] +build-backend = "hatchling.build" + +[tool.pytest.ini_options] +pythonpath = ["src", "packages/azure/src", "packages/gcp/src"] + +[tool.mypy] +disable_error_code = "misc" +namespace_packages = true +explicit_package_bases = true +mypy_path = "src:packages/azure/src:packages/gcp/src" + +[[tool.mypy.overrides]] +module = "typing_inspect" +ignore_missing_imports = true +[[tool.mypy.overrides]] +module = [ + "jsonpath.*", + "typing_inspect.*", + "authlib.*", + "websockets.*", + "mcp.*", + "griffe.*", + "google.*" +] +ignore_missing_imports = true +[tool.pyright] +venvPath = "." +venv = ".venv" diff --git a/scripts/lint_custom_code.sh b/scripts/lint_custom_code.sh new file mode 100755 index 00000000..4baa3d88 --- /dev/null +++ b/scripts/lint_custom_code.sh @@ -0,0 +1,91 @@ +#!/usr/bin/env bash + +ERRORS=0 + +echo "Checking PEP 420 namespace integrity..." +if [ -f src/mistralai/__init__.py ]; then + echo "ERROR: PEP 420 violation - src/mistralai/__init__.py must not exist" + ERRORS=1 +else + echo "-> PEP 420 namespace OK (core)" +fi +if [ -f packages/azure/src/mistralai/__init__.py ]; then + echo "ERROR: PEP 420 violation - packages/azure/src/mistralai/__init__.py must not exist" + ERRORS=1 +else + echo "-> PEP 420 namespace OK (azure)" +fi +if [ -f packages/gcp/src/mistralai/__init__.py ]; then + echo "ERROR: PEP 420 violation - packages/gcp/src/mistralai/__init__.py must not exist" + ERRORS=1 +else + echo "-> PEP 420 namespace OK (gcp)" +fi + +echo "Running mypy..." +echo "-> running on examples" +uv run mypy examples/ \ + --exclude 'audio/' || ERRORS=1 +echo "-> running on extra" +uv run mypy src/mistralai/extra/ || ERRORS=1 +echo "-> running on hooks" +uv run mypy src/mistralai/client/_hooks/ \ + --exclude __init__.py --exclude sdkhooks.py --exclude types.py || ERRORS=1 +echo "-> running on azure hooks" +uv run mypy packages/azure/src/mistralai/azure/client/_hooks/ \ + --exclude __init__.py --exclude sdkhooks.py --exclude types.py || ERRORS=1 +echo "-> running on azure sdk" +uv run mypy packages/azure/src/mistralai/azure/client/sdk.py || ERRORS=1 +echo "-> running on gcp hooks" +uv run mypy packages/gcp/src/mistralai/gcp/client/_hooks/ \ + --exclude __init__.py --exclude sdkhooks.py --exclude types.py || ERRORS=1 +echo "-> running on gcp sdk" +uv run mypy packages/gcp/src/mistralai/gcp/client/sdk.py || ERRORS=1 +echo "-> running on scripts" +uv run mypy scripts/ || ERRORS=1 + +echo "Running pyright..." +# TODO: Uncomment once the examples are fixed +# uv run pyright examples/ || ERRORS=1 +echo "-> running on extra" +uv run pyright src/mistralai/extra/ || ERRORS=1 +echo "-> running on hooks" +uv run pyright src/mistralai/client/_hooks/ || ERRORS=1 +echo "-> running on azure hooks" +uv run pyright packages/azure/src/mistralai/azure/client/_hooks/ || ERRORS=1 +echo "-> running on azure sdk" +uv run pyright packages/azure/src/mistralai/azure/client/sdk.py || ERRORS=1 +echo "-> running on gcp hooks" +uv run pyright packages/gcp/src/mistralai/gcp/client/_hooks/ || ERRORS=1 +echo "-> running on gcp sdk" +uv run pyright packages/gcp/src/mistralai/gcp/client/sdk.py || ERRORS=1 +echo "-> running on scripts" +uv run pyright scripts/ || ERRORS=1 + +echo "Running ruff..." +echo "-> running on examples" +uv run ruff check examples/ || ERRORS=1 +echo "-> running on extra" +uv run ruff check src/mistralai/extra/ || ERRORS=1 +echo "-> running on hooks" +uv run ruff check src/mistralai/client/_hooks/ \ + --exclude __init__.py --exclude sdkhooks.py --exclude types.py || ERRORS=1 +echo "-> running on azure hooks" +uv run ruff check packages/azure/src/mistralai/azure/client/_hooks/ \ + --exclude __init__.py --exclude sdkhooks.py --exclude types.py || ERRORS=1 +echo "-> running on azure sdk" +uv run ruff check packages/azure/src/mistralai/azure/client/sdk.py || ERRORS=1 +echo "-> running on gcp hooks" +uv run ruff check packages/gcp/src/mistralai/gcp/client/_hooks/ \ + --exclude __init__.py --exclude sdkhooks.py --exclude types.py || ERRORS=1 +echo "-> running on gcp sdk" +uv run ruff check packages/gcp/src/mistralai/gcp/client/sdk.py || ERRORS=1 +echo "-> running on scripts" +uv run ruff check scripts/ || ERRORS=1 + +if [ "$ERRORS" -ne 0 ]; then + echo "❌ One or more linters failed" + exit 1 +else + echo "✅ All linters passed" +fi diff --git a/scripts/prepare_readme.py b/scripts/prepare_readme.py new file mode 100644 index 00000000..c220a055 --- /dev/null +++ b/scripts/prepare_readme.py @@ -0,0 +1,107 @@ +import argparse +import re +import subprocess +import sys +from pathlib import Path + +DEFAULT_REPO_URL = "https://github.com/mistralai/client-python.git" +DEFAULT_BRANCH = "main" +LINK_PATTERN = re.compile(r"(\[[^\]]+\]\()((?!https?:)[^\)]+)(\))") + + +def build_base_url(repo_url: str, branch: str, repo_subdir: str) -> str: + """Build the GitHub base URL used to rewrite relative README links.""" + normalized_repo_url = repo_url[:-4] if repo_url.endswith(".git") else repo_url + normalized_subdir = repo_subdir.strip("/") + if normalized_subdir: + normalized_subdir = f"{normalized_subdir}/" + return f"{normalized_repo_url}/blob/{branch}/{normalized_subdir}" + + +def rewrite_relative_links(contents: str, base_url: str) -> str: + """Rewrite Markdown relative links to absolute GitHub URLs.""" + return LINK_PATTERN.sub( + lambda match: f"{match.group(1)}{base_url}{match.group(2)}{match.group(3)}", + contents, + ) + + +def run_with_rewritten_readme( + readme_path: Path, base_url: str, command: list[str] +) -> int: + """Rewrite README links, run a command, and restore the original README.""" + original_contents = readme_path.read_text(encoding="utf-8") + rewritten_contents = rewrite_relative_links(original_contents, base_url) + readme_path.write_text(rewritten_contents, encoding="utf-8") + try: + if not command: + return 0 + result = subprocess.run(command, check=False) + return result.returncode + finally: + readme_path.write_text(original_contents, encoding="utf-8") + + +def parse_args(argv: list[str]) -> argparse.Namespace: + """Parse command-line arguments for README rewriting.""" + parser = argparse.ArgumentParser( + description=( + "Rewrite README links to absolute GitHub URLs while running a command." + ) + ) + parser.add_argument( + "--readme", + type=Path, + default=Path("README.md"), + help="Path to the README file to rewrite.", + ) + parser.add_argument( + "--repo-url", + default=DEFAULT_REPO_URL, + help="Repository URL used to build absolute links.", + ) + parser.add_argument( + "--branch", + default=DEFAULT_BRANCH, + help="Repository branch used for absolute links.", + ) + parser.add_argument( + "--repo-subdir", + default="", + help="Repository subdirectory that contains the README.", + ) + parser.add_argument( + "command", + nargs=argparse.REMAINDER, + help=( + "Command to run (prefix with -- to stop option parsing). " + "If omitted, the rewritten README is printed to stdout." + ), + ) + return parser.parse_args(argv) + + +def main(argv: list[str]) -> int: + """Entry point for rewriting README links during build commands.""" + args = parse_args(argv) + readme_path = args.readme + if not readme_path.is_file(): + raise FileNotFoundError(f"README file not found: {readme_path}") + base_url = build_base_url(args.repo_url, args.branch, args.repo_subdir) + command = ( + args.command[1:] + if args.command and args.command[0] == "--" + else args.command + ) + if not command: + rewritten_contents = rewrite_relative_links( + readme_path.read_text(encoding="utf-8"), + base_url, + ) + sys.stdout.write(rewritten_contents) + return 0 + return run_with_rewritten_readme(readme_path, base_url, command) + + +if __name__ == "__main__": + sys.exit(main(sys.argv[1:])) diff --git a/scripts/publish.sh b/scripts/publish.sh new file mode 100755 index 00000000..c35748f3 --- /dev/null +++ b/scripts/publish.sh @@ -0,0 +1,6 @@ +#!/usr/bin/env bash + +uv run python scripts/prepare_readme.py + +uv build +uv publish --token $PYPI_TOKEN diff --git a/scripts/run_examples.sh b/scripts/run_examples.sh new file mode 100755 index 00000000..eca854b4 --- /dev/null +++ b/scripts/run_examples.sh @@ -0,0 +1,128 @@ +#!/bin/bash + +# Defaults +RETRY_COUNT=3 +NO_EXTRA_DEP=false + +# Parse command line arguments +while [[ $# -gt 0 ]]; do + case $1 in + --no-extra-dep) + NO_EXTRA_DEP=true + shift + ;; + --retry-count) + RETRY_COUNT="$2" + shift 2 + ;; + --help) + echo "Usage: $0 [--no-extra-dep] [--retry-count N]" + echo " --no-extra-dep: Exclude files that require extra dependencies" + echo " --retry-count N: Number of retries for each test (default: 3)" + echo " --help: Show this help message" + exit 0 + ;; + *) + echo "Unknown option: $1" + echo "Use --help for usage information" + exit 1 + ;; + esac +done + +# List of files to always exclude +exclude_files=( + "examples/mistral/chat/chatbot_with_streaming.py" + "examples/mistral/agents/async_conversation_run_mcp_remote_auth.py" + "examples/mistral/jobs/async_fine_tuning.py" + "examples/mistral/jobs/async_fine_tuning_chat.py" + "examples/mistral/jobs/fine_tuning.py" + "examples/mistral/jobs/fine_tuning_dry_run.py" + "examples/mistral/jobs/async_jobs_ocr_batch_annotation.py" + "examples/mistral/classifier/async_classifier.py" + "examples/mistral/mcp_servers/sse_server.py" + "examples/mistral/mcp_servers/stdio_server.py" + "examples/mistral/agents/async_conversation_run_code_interpreter.py" + "examples/mistral/agents/async_conversation_run_stream.py" + "examples/mistral/agents/async_conversation_run_mcp.py" + "examples/mistral/agents/async_conversation_run_mcp_remote.py" + "examples/mistral/audio/async_realtime_transcription_microphone.py" + "examples/mistral/audio/async_realtime_transcription_stream.py" + "examples/mistral/audio/async_realtime_transcription_dual_delay_microphone.py" +) + +# Files that require extra dependencies (agents, mcp, audio, etc.) +extra_dep_files=( + "examples/mistral/agents/" + "examples/mistral/mcp_servers/" + "examples/mistral/audio/" +) + +if [ "$NO_EXTRA_DEP" = true ]; then + for pattern in "${extra_dep_files[@]}"; do + for f in ${pattern}*.py; do + [ -f "$f" ] && exclude_files+=("$f") + done + done +fi + +failed=0 + +echo "Skipping scripts" +for file in "${exclude_files[@]}"; do + echo "$file" +done + +# Function to run a test with retries +run_test_with_retries() { + local file="$1" + local attempt=1 + local error_outputs=() + + while [ $attempt -le $RETRY_COUNT ]; do + echo "Running $file (attempt $attempt/$RETRY_COUNT)" + + # Run the script and capture both exit status and error output + current_output=$(python3 "$file" 2>&1) + exit_code=$? + + if [ $exit_code -eq 0 ]; then + echo "Success" + return 0 + else + # Store the error output from this attempt + error_outputs+=("Attempt $attempt: $current_output") + + if [ $attempt -lt $RETRY_COUNT ]; then + echo "Failed (attempt $attempt/$RETRY_COUNT), retrying..." + sleep 1 # Brief pause before retry + else + echo "Failed after $RETRY_COUNT attempts" + echo "Error outputs from all attempts:" + for error_output in "${error_outputs[@]}"; do + echo "$error_output" + echo "---" + done + return 1 + fi + fi + + attempt=$((attempt + 1)) + done +} + +for file in examples/mistral/**/*.py; do + # Check if the file is not in the exclude list + if [ -f "$file" ] && [[ ! " ${exclude_files[@]} " =~ " $file " ]]; then + if ! run_test_with_retries "$file"; then + failed=1 + fi + else + echo "Skipped $file" + fi +done + +# If one of the example scripts failed, then exit +if [ $failed -ne 0 ]; then + exit 1 +fi diff --git a/src/mistralai/async_client.py b/src/mistralai/async_client.py deleted file mode 100644 index c0dffc62..00000000 --- a/src/mistralai/async_client.py +++ /dev/null @@ -1,342 +0,0 @@ -import asyncio -import logging -import os -import posixpath -import time -from collections import defaultdict -from json import JSONDecodeError -from typing import Any, AsyncGenerator, Awaitable, Callable, Dict, List, Optional, Union - -import aiohttp -import backoff -import orjson - -from mistralai.client_base import ClientBase -from mistralai.constants import ENDPOINT, RETRY_STATUS_CODES -from mistralai.exceptions import ( - MistralAPIException, - MistralConnectionException, - MistralException, -) -from mistralai.models.chat_completion import ( - ChatCompletionResponse, - ChatCompletionStreamResponse, - ChatMessage, -) -from mistralai.models.embeddings import EmbeddingResponse -from mistralai.models.models import ModelList - - -class AIOHTTPBackend: - """HTTP backend which handles retries, concurrency limiting and logging""" - - SLEEP_AFTER_FAILURE = defaultdict(lambda: 0.25, {429: 5.0}) - - _requester: Callable[..., Awaitable[aiohttp.ClientResponse]] - _semaphore: asyncio.Semaphore - _session: Optional[aiohttp.ClientSession] - - def __init__( - self, - max_concurrent_requests: int = 64, - max_retries: int = 5, - timeout: int = 120, - ): - self._logger = logging.getLogger(__name__) - self._timeout = timeout - self._max_retries = max_retries - self._session = None - self._max_concurrent_requests = max_concurrent_requests - - def build_aio_requester( - self, - ) -> Callable: # returns a function for retryable requests - @backoff.on_exception( - backoff.expo, - (aiohttp.ClientError, aiohttp.ClientResponseError), - max_tries=self._max_retries + 1, - max_time=self._timeout, - ) - async def make_request_fn( - session: aiohttp.ClientSession, *args: Any, **kwargs: Any - ) -> aiohttp.ClientResponse: - async with self._semaphore: # this limits total concurrency by the client - response = await session.request(*args, **kwargs) - if ( - response.status in RETRY_STATUS_CODES - ): # likely temporary, raise to retry - self._logger.info(f"Received status {response.status}, retrying...") - await asyncio.sleep(self.SLEEP_AFTER_FAILURE[response.status]) - response.raise_for_status() - - return response - - return make_request_fn - - async def request( - self, - url: str, - json: Optional[Dict[str, Any]] = None, - method: str = "post", - headers: Optional[Dict[str, Any]] = None, - session: Optional[aiohttp.ClientSession] = None, - params: Optional[Dict[str, Any]] = None, - **kwargs: Any, - ) -> aiohttp.ClientResponse: - session = session or await self.session() - self._logger.debug(f"Making request to {url} with content {json}") - - request_start = time.time() - try: - response = await self._requester( - session, - method, - url, - headers=headers, - json=json, - params=params, - **kwargs, - ) - except ( - aiohttp.ClientConnectionError - ) as e: # ensure the SDK user does not have to deal with knowing aiohttp - self._logger.debug( - f"Fatal connection error after {time.time()-request_start:.1f}s: {e}" - ) - raise MistralConnectionException(str(e)) from e - except ( - aiohttp.ClientResponseError - ) as e: # status 500 or something remains after retries - self._logger.debug( - f"Fatal ClientResponseError error after {time.time()-request_start:.1f}s: {e}" - ) - raise MistralConnectionException(str(e)) from e - except asyncio.TimeoutError as e: - self._logger.debug( - f"Fatal timeout error after {time.time()-request_start:.1f}s: {e}" - ) - raise MistralConnectionException("The request timed out") from e - except Exception as e: # Anything caught here should be added above - self._logger.debug( - f"Unexpected fatal error after {time.time()-request_start:.1f}s: {e}" - ) - raise MistralException( - f"Unexpected exception ({e.__class__.__name__}): {e}" - ) from e - - self._logger.debug( - f"Received response with status {response.status} after {time.time()-request_start:.1f}s" - ) - return response - - async def session(self) -> aiohttp.ClientSession: - if self._session is None: - self._session = aiohttp.ClientSession( - timeout=aiohttp.ClientTimeout(self._timeout), - connector=aiohttp.TCPConnector(limit=0), - ) - self._semaphore = asyncio.Semaphore(self._max_concurrent_requests) - self._requester = self.build_aio_requester() - return self._session - - async def close(self) -> None: - if self._session is not None: - await self._session.close() - self._session = None - - def __del__(self) -> None: - # https://stackoverflow.com/questions/54770360/how-can-i-wait-for-an-objects-del-to-finish-before-the-async-loop-closes - if self._session: - try: - loop = asyncio.get_event_loop() - if loop.is_running(): - loop.create_task(self.close()) - else: - loop.run_until_complete(self.close()) - except Exception: - pass - - -class MistralAsyncClient(ClientBase): - def __init__( - self, - api_key: Optional[str] = os.environ.get("MISTRAL_API_KEY", None), - endpoint: str = ENDPOINT, - max_retries: int = 5, - timeout: int = 120, - max_concurrent_requests: int = 64, - ): - super().__init__(endpoint, api_key, max_retries, timeout) - - self._backend = AIOHTTPBackend( - max_concurrent_requests=max_concurrent_requests, - max_retries=max_retries, - timeout=timeout, - ) - - async def close(self) -> None: - await self._backend.close() - - async def _request( - self, - method: str, - json: Dict[str, Any], - path: str, - stream: bool = False, - params: Optional[Dict[str, Any]] = None, - ) -> Union[Dict[str, Any], aiohttp.ClientResponse]: - - headers = { - "Authorization": f"Bearer {self._api_key}", - "Content-Type": "application/json", - } - - url = posixpath.join(self._endpoint, path) - - response = await self._backend.request( - url, json, method, headers, params=params - ) - if stream: - return response - - try: - json_response: Dict[str, Any] = await response.json() - except JSONDecodeError: - raise MistralAPIException.from_aio_response( - response, message=f"Failed to decode json body: {await response.text()}" - ) - except aiohttp.ClientPayloadError as e: - raise MistralAPIException.from_aio_response( - response, - message=f"An unexpected error occurred while receiving the response: {e}", - ) - - self._logger.debug(f"JSON response: {json_response}") - self._check_response(json_response, dict(response.headers), response.status) - return json_response - - async def chat( - self, - model: str, - messages: List[ChatMessage], - temperature: Optional[float] = None, - max_tokens: Optional[int] = None, - top_p: Optional[float] = None, - random_seed: Optional[int] = None, - safe_mode: bool = False, - ) -> ChatCompletionResponse: - """ A asynchronous chat endpoint that returns a single response. - - Args: - model (str): model the name of the model to chat with, e.g. mistral-tiny - messages (List[ChatMessage]): messages an array of messages to chat with, e.g. - [{role: 'user', content: 'What is the best French cheese?'}] - temperature (Optional[float], optional): temperature the temperature to use for sampling, e.g. 0.5. - max_tokens (Optional[int], optional): the maximum number of tokens to generate, e.g. 100. Defaults to None. - top_p (Optional[float], optional): the cumulative probability of tokens to generate, e.g. 0.9. - Defaults to None. - random_seed (Optional[int], optional): the random seed to use for sampling, e.g. 42. Defaults to None. - safe_mode (bool, optional): whether to use safe mode, e.g. true. Defaults to False. - - Returns: - ChatCompletionResponse: a response object containing the generated text. - """ - request = self._make_chat_request( - model, - messages, - temperature=temperature, - max_tokens=max_tokens, - top_p=top_p, - random_seed=random_seed, - stream=False, - safe_mode=safe_mode, - ) - - response = await self._request("post", request, "v1/chat/completions") - assert isinstance(response, dict), "Bad response from _request" - return ChatCompletionResponse(**response) - - async def chat_stream( - self, - model: str, - messages: List[ChatMessage], - temperature: Optional[float] = None, - max_tokens: Optional[int] = None, - top_p: Optional[float] = None, - random_seed: Optional[int] = None, - safe_mode: bool = False, - ) -> AsyncGenerator[ChatCompletionStreamResponse, None]: - """ An Asynchronous chat endpoint that streams responses. - - Args: - model (str): model the name of the model to chat with, e.g. mistral-tiny - messages (List[ChatMessage]): messages an array of messages to chat with, e.g. - [{role: 'user', content: 'What is the best French cheese?'}] - temperature (Optional[float], optional): temperature the temperature to use for sampling, e.g. 0.5. - max_tokens (Optional[int], optional): the maximum number of tokens to generate, e.g. 100. Defaults to None. - top_p (Optional[float], optional): the cumulative probability of tokens to generate, e.g. 0.9. - Defaults to None. - random_seed (Optional[int], optional): the random seed to use for sampling, e.g. 42. Defaults to None. - safe_mode (bool, optional): whether to use safe mode, e.g. true. Defaults to False. - - Returns: - AsyncGenerator[ChatCompletionStreamResponse, None]: - An async generator that yields ChatCompletionStreamResponse objects. - """ - - request = self._make_chat_request( - model, - messages, - temperature=temperature, - max_tokens=max_tokens, - top_p=top_p, - random_seed=random_seed, - stream=True, - safe_mode=safe_mode, - ) - async_response = await self._request( - "post", request, "v1/chat/completions", stream=True - ) - - assert isinstance( - async_response, aiohttp.ClientResponse - ), "Bad response from _request" - - async with async_response as response: - async for line in response.content: - if line == b"\n": - continue - - if line.startswith(b"data: "): - line = line[6:].strip() - if line != b"[DONE]": - json_response = orjson.loads(line) - yield ChatCompletionStreamResponse(**json_response) - - async def embeddings( - self, model: str, input: Union[str, List[str]] - ) -> EmbeddingResponse: - """An asynchronous embeddings endpoint that returns embeddings for a single, or batch of inputs - - Args: - model (str): The embedding model to use, e.g. mistral-embed - input (Union[str, List[str]]): The input to embed, - e.g. ['What is the best French cheese?'] - - Returns: - EmbeddingResponse: A response object containing the embeddings. - """ - request = {"model": model, "input": input} - response = await self._request("post", request, "v1/embeddings") - assert isinstance(response, dict), "Bad response from _request" - return EmbeddingResponse(**response) - - async def list_models(self) -> ModelList: - """Returns a list of the available models - - Returns: - ModelList: A response object containing the list of models. - """ - response = await self._request("get", {}, "v1/models") - assert isinstance(response, dict), "Bad response from _request" - return ModelList(**response) diff --git a/src/mistralai/client.py b/src/mistralai/client.py deleted file mode 100644 index 99ea8cf3..00000000 --- a/src/mistralai/client.py +++ /dev/null @@ -1,221 +0,0 @@ -import os -import posixpath -from json import JSONDecodeError -from typing import Any, Dict, Iterable, List, Optional, Union - -import orjson -import requests -from requests import Response -from requests.adapters import HTTPAdapter -from urllib3.util.retry import Retry - -from mistralai.client_base import ClientBase -from mistralai.constants import ENDPOINT, RETRY_STATUS_CODES -from mistralai.exceptions import ( - MistralAPIException, - MistralConnectionException, - MistralException, -) -from mistralai.models.chat_completion import ( - ChatCompletionResponse, - ChatCompletionStreamResponse, - ChatMessage, -) -from mistralai.models.embeddings import EmbeddingResponse -from mistralai.models.models import ModelList - - -class MistralClient(ClientBase): - """ - Synchronous wrapper around the async client - """ - - def __init__( - self, - api_key: Optional[str] = os.environ.get("MISTRAL_API_KEY", None), - endpoint: str = ENDPOINT, - max_retries: int = 5, - timeout: int = 120, - ): - super().__init__(endpoint, api_key, max_retries, timeout) - - def _request( - self, - method: str, - json: Dict[str, Any], - path: str, - stream: bool = False, - params: Optional[Dict[str, Any]] = None, - ) -> Union[Response, Dict[str, Any]]: - headers = { - "Authorization": f"Bearer {self._api_key}", - "Content-Type": "application/json", - } - - url = posixpath.join(self._endpoint, path) - - with requests.Session() as session: - retries = Retry( - total=self._max_retries, - backoff_factor=0.5, - allowed_methods=["POST", "GET"], - status_forcelist=RETRY_STATUS_CODES, - raise_on_status=False, - ) - session.mount("https://", HTTPAdapter(max_retries=retries)) - session.mount("https://", HTTPAdapter(max_retries=retries)) - - if stream: - return session.request( - method, url, headers=headers, json=json, stream=True - ) - - try: - response = session.request( - method, - url, - headers=headers, - json=json, - timeout=self._timeout, - params=params, - ) - except requests.exceptions.ConnectionError as e: - raise MistralConnectionException(str(e)) from e - except requests.exceptions.RequestException as e: - raise MistralException( - f"Unexpected exception ({e.__class__.__name__}): {e}" - ) from e - - try: - json_response: Dict[str, Any] = response.json() - except JSONDecodeError: - raise MistralAPIException.from_response( - response, message=f"Failed to decode json body: {response.text}" - ) - - self._check_response( - json_response, dict(response.headers), response.status_code - ) - return json_response - - def chat( - self, - model: str, - messages: List[ChatMessage], - temperature: Optional[float] = None, - max_tokens: Optional[int] = None, - top_p: Optional[float] = None, - random_seed: Optional[int] = None, - safe_mode: bool = False, - ) -> ChatCompletionResponse: - """ A chat endpoint that returns a single response. - - Args: - model (str): model the name of the model to chat with, e.g. mistral-tiny - messages (List[ChatMessage]): messages an array of messages to chat with, e.g. - [{role: 'user', content: 'What is the best French cheese?'}] - temperature (Optional[float], optional): temperature the temperature to use for sampling, e.g. 0.5. - max_tokens (Optional[int], optional): the maximum number of tokens to generate, e.g. 100. Defaults to None. - top_p (Optional[float], optional): the cumulative probability of tokens to generate, e.g. 0.9. - Defaults to None. - random_seed (Optional[int], optional): the random seed to use for sampling, e.g. 42. Defaults to None. - safe_mode (bool, optional): whether to use safe mode, e.g. true. Defaults to False. - - Returns: - ChatCompletionResponse: a response object containing the generated text. - """ - request = self._make_chat_request( - model, - messages, - temperature=temperature, - max_tokens=max_tokens, - top_p=top_p, - random_seed=random_seed, - stream=False, - safe_mode=safe_mode, - ) - - response = self._request("post", request, "v1/chat/completions") - - assert isinstance(response, dict), "Bad response from _request" - - return ChatCompletionResponse(**response) - - def chat_stream( - self, - model: str, - messages: List[ChatMessage], - temperature: Optional[float] = None, - max_tokens: Optional[int] = None, - top_p: Optional[float] = None, - random_seed: Optional[int] = None, - safe_mode: bool = False, - ) -> Iterable[ChatCompletionStreamResponse]: - """ A chat endpoint that streams responses. - - Args: - model (str): model the name of the model to chat with, e.g. mistral-tiny - messages (List[ChatMessage]): messages an array of messages to chat with, e.g. - [{role: 'user', content: 'What is the best French cheese?'}] - temperature (Optional[float], optional): temperature the temperature to use for sampling, e.g. 0.5. - max_tokens (Optional[int], optional): the maximum number of tokens to generate, e.g. 100. Defaults to None. - top_p (Optional[float], optional): the cumulative probability of tokens to generate, e.g. 0.9. - Defaults to None. - random_seed (Optional[int], optional): the random seed to use for sampling, e.g. 42. Defaults to None. - safe_mode (bool, optional): whether to use safe mode, e.g. true. Defaults to False. - - Returns: - Iterable[ChatCompletionStreamResponse]: - A generator that yields ChatCompletionStreamResponse objects. - """ - request = self._make_chat_request( - model, - messages, - temperature=temperature, - max_tokens=max_tokens, - top_p=top_p, - random_seed=random_seed, - stream=True, - safe_mode=safe_mode, - ) - - response = self._request("post", request, "v1/chat/completions", stream=True) - - assert isinstance(response, Response), "Bad response from _request" - - for line in response.iter_lines(): - self._logger.debug(f"Received line: {line}") - if line == b"\n": - continue - - if line.startswith(b"data: "): - line = line[6:].strip() - if line != b"[DONE]": - json_response = orjson.loads(line) - yield ChatCompletionStreamResponse(**json_response) - - def embeddings(self, model: str, input: Union[str, List[str]]) -> EmbeddingResponse: - """An embeddings endpoint that returns embeddings for a single, or batch of inputs - - Args: - model (str): The embedding model to use, e.g. mistral-embed - input (Union[str, List[str]]): The input to embed, - e.g. ['What is the best French cheese?'] - - Returns: - EmbeddingResponse: A response object containing the embeddings. - """ - request = {"model": model, "input": input} - response = self._request("post", request, "v1/embeddings") - assert isinstance(response, dict), "Bad response from _request" - return EmbeddingResponse(**response) - - def list_models(self) -> ModelList: - """Returns a list of the available models - - Returns: - ModelList: A response object containing the list of models. - """ - response = self._request("get", {}, "v1/models") - assert isinstance(response, dict), "Bad response from _request" - return ModelList(**response) diff --git a/src/mistralai/client/__init__.py b/src/mistralai/client/__init__.py new file mode 100644 index 00000000..4b79610a --- /dev/null +++ b/src/mistralai/client/__init__.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: f1b791f9d2a5 + +from ._version import ( + __title__, + __version__, + __openapi_doc_version__, + __gen_version__, + __user_agent__, +) +from .sdk import * +from .sdkconfiguration import * + + +VERSION: str = __version__ +OPENAPI_DOC_VERSION = __openapi_doc_version__ +SPEAKEASY_GENERATOR_VERSION = __gen_version__ +USER_AGENT = __user_agent__ diff --git a/src/mistralai/client/_hooks/__init__.py b/src/mistralai/client/_hooks/__init__.py new file mode 100644 index 00000000..66a04e37 --- /dev/null +++ b/src/mistralai/client/_hooks/__init__.py @@ -0,0 +1,6 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: cef9ff97efd7 + +from .sdkhooks import * +from .types import * +from .registration import * diff --git a/src/mistralai/client/_hooks/custom_user_agent.py b/src/mistralai/client/_hooks/custom_user_agent.py new file mode 100644 index 00000000..212f2172 --- /dev/null +++ b/src/mistralai/client/_hooks/custom_user_agent.py @@ -0,0 +1,22 @@ +# MAKE SURE YOU UPDATE THE COPIES OF THIS FILES IN THE PROVIDERS'S PACKAGES WHEN YOU MAKE CHANGES HERE +from typing import Union + +import httpx + +from .types import BeforeRequestContext, BeforeRequestHook + +PREFIX = "mistral-client-python/" + +class CustomUserAgentHook(BeforeRequestHook): + def before_request( + self, hook_ctx: BeforeRequestContext, request: httpx.Request + ) -> Union[httpx.Request, Exception]: + current = request.headers["user-agent"] + if current.startswith(PREFIX): + return request + + request.headers["user-agent"] = ( + PREFIX + current.split(" ")[1] + ) + + return request diff --git a/src/mistralai/client/_hooks/deprecation_warning.py b/src/mistralai/client/_hooks/deprecation_warning.py new file mode 100644 index 00000000..8de2968b --- /dev/null +++ b/src/mistralai/client/_hooks/deprecation_warning.py @@ -0,0 +1,26 @@ +import logging +from typing import Union + +import httpx + +from .types import AfterSuccessContext, AfterSuccessHook + +logger = logging.getLogger(__name__) + +HEADER_MODEL_DEPRECATION_TIMESTAMP = "x-model-deprecation-timestamp" + + +class DeprecationWarningHook(AfterSuccessHook): + + def after_success( + self, hook_ctx: AfterSuccessContext, response: httpx.Response + ) -> Union[httpx.Response, Exception]: + if HEADER_MODEL_DEPRECATION_TIMESTAMP in response.headers: + model = response.json()["model"] + # pylint: disable=logging-fstring-interpolation + logger.warning( + "WARNING: The model %s is deprecated and will be removed on %s. Please refer to https://docs.mistral.ai/getting-started/models/#api-versioning for more information.", + model, + response.headers[HEADER_MODEL_DEPRECATION_TIMESTAMP], + ) + return response diff --git a/src/mistralai/client/_hooks/registration.py b/src/mistralai/client/_hooks/registration.py new file mode 100644 index 00000000..58bebab0 --- /dev/null +++ b/src/mistralai/client/_hooks/registration.py @@ -0,0 +1,22 @@ +from .custom_user_agent import CustomUserAgentHook +from .deprecation_warning import DeprecationWarningHook +from .tracing import TracingHook +from .types import Hooks + +# This file is only ever generated once on the first generation and then is free to be modified. +# Any hooks you wish to add should be registered in the init_hooks function. Feel free to define them +# in this file or in separate files in the hooks folder. + + +def init_hooks(hooks: Hooks): + # pylint: disable=unused-argument + """Add hooks by calling hooks.register{sdk_init/before_request/after_success/after_error}Hook + with an instance of a hook that implements that specific Hook interface + Hooks are registered per SDK instance, and are valid for the lifetime of the SDK instance + """ + tracing_hook = TracingHook() + hooks.register_before_request_hook(CustomUserAgentHook()) + hooks.register_after_success_hook(DeprecationWarningHook()) + hooks.register_after_success_hook(tracing_hook) + hooks.register_before_request_hook(tracing_hook) + hooks.register_after_error_hook(tracing_hook) diff --git a/src/mistralai/client/_hooks/sdkhooks.py b/src/mistralai/client/_hooks/sdkhooks.py new file mode 100644 index 00000000..ecf94240 --- /dev/null +++ b/src/mistralai/client/_hooks/sdkhooks.py @@ -0,0 +1,77 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: ed1e485b2153 + +import httpx +from .types import ( + SDKInitHook, + BeforeRequestContext, + BeforeRequestHook, + AfterSuccessContext, + AfterSuccessHook, + AfterErrorContext, + AfterErrorHook, + Hooks, +) +from .registration import init_hooks +from typing import List, Optional, Tuple +from mistralai.client.httpclient import HttpClient + + +class SDKHooks(Hooks): + def __init__(self) -> None: + self.sdk_init_hooks: List[SDKInitHook] = [] + self.before_request_hooks: List[BeforeRequestHook] = [] + self.after_success_hooks: List[AfterSuccessHook] = [] + self.after_error_hooks: List[AfterErrorHook] = [] + init_hooks(self) + + def register_sdk_init_hook(self, hook: SDKInitHook) -> None: + self.sdk_init_hooks.append(hook) + + def register_before_request_hook(self, hook: BeforeRequestHook) -> None: + self.before_request_hooks.append(hook) + + def register_after_success_hook(self, hook: AfterSuccessHook) -> None: + self.after_success_hooks.append(hook) + + def register_after_error_hook(self, hook: AfterErrorHook) -> None: + self.after_error_hooks.append(hook) + + def sdk_init(self, base_url: str, client: HttpClient) -> Tuple[str, HttpClient]: + for hook in self.sdk_init_hooks: + base_url, client = hook.sdk_init(base_url, client) + return base_url, client + + def before_request( + self, hook_ctx: BeforeRequestContext, request: httpx.Request + ) -> httpx.Request: + for hook in self.before_request_hooks: + out = hook.before_request(hook_ctx, request) + if isinstance(out, Exception): + raise out + request = out + + return request + + def after_success( + self, hook_ctx: AfterSuccessContext, response: httpx.Response + ) -> httpx.Response: + for hook in self.after_success_hooks: + out = hook.after_success(hook_ctx, response) + if isinstance(out, Exception): + raise out + response = out + return response + + def after_error( + self, + hook_ctx: AfterErrorContext, + response: Optional[httpx.Response], + error: Optional[Exception], + ) -> Tuple[Optional[httpx.Response], Optional[Exception]]: + for hook in self.after_error_hooks: + result = hook.after_error(hook_ctx, response, error) + if isinstance(result, Exception): + raise result + response, error = result + return response, error diff --git a/src/mistralai/client/_hooks/tracing.py b/src/mistralai/client/_hooks/tracing.py new file mode 100644 index 00000000..b353d9bd --- /dev/null +++ b/src/mistralai/client/_hooks/tracing.py @@ -0,0 +1,75 @@ +import logging +from typing import Optional, Tuple, Union + +import httpx +from opentelemetry.trace import Span + +from mistralai.extra.observability.otel import ( + get_or_create_otel_tracer, + get_response_and_error, + get_traced_request_and_span, + get_traced_response, +) +from .types import ( + AfterErrorContext, + AfterErrorHook, + AfterSuccessContext, + AfterSuccessHook, + BeforeRequestContext, + BeforeRequestHook, +) + +logger = logging.getLogger(__name__) + + +class TracingHook(BeforeRequestHook, AfterSuccessHook, AfterErrorHook): + def __init__(self) -> None: + self.tracing_enabled, self.tracer = get_or_create_otel_tracer() + self.request_span: Optional[Span] = None + + def before_request( + self, hook_ctx: BeforeRequestContext, request: httpx.Request + ) -> Union[httpx.Request, Exception]: + # Refresh tracer/provider per request so tracing can be enabled if the + # application configures OpenTelemetry after the client is instantiated. + self.tracing_enabled, self.tracer = get_or_create_otel_tracer() + self.request_span = None + request, self.request_span = get_traced_request_and_span( + tracing_enabled=self.tracing_enabled, + tracer=self.tracer, + span=self.request_span, + operation_id=hook_ctx.operation_id, + request=request, + ) + return request + + def after_success( + self, hook_ctx: AfterSuccessContext, response: httpx.Response + ) -> Union[httpx.Response, Exception]: + response = get_traced_response( + tracing_enabled=self.tracing_enabled, + tracer=self.tracer, + span=self.request_span, + operation_id=hook_ctx.operation_id, + response=response, + ) + self.request_span = None + return response + + def after_error( + self, + hook_ctx: AfterErrorContext, + response: Optional[httpx.Response], + error: Optional[Exception], + ) -> Union[Tuple[Optional[httpx.Response], Optional[Exception]], Exception]: + if response: + response, error = get_response_and_error( + tracing_enabled=self.tracing_enabled, + tracer=self.tracer, + span=self.request_span, + operation_id=hook_ctx.operation_id, + response=response, + error=error, + ) + self.request_span = None + return response, error diff --git a/src/mistralai/client/_hooks/types.py b/src/mistralai/client/_hooks/types.py new file mode 100644 index 00000000..036d44b8 --- /dev/null +++ b/src/mistralai/client/_hooks/types.py @@ -0,0 +1,114 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 85cfedfb7582 + +from abc import ABC, abstractmethod +import httpx +from mistralai.client.httpclient import HttpClient +from mistralai.client.sdkconfiguration import SDKConfiguration +from typing import Any, Callable, List, Optional, Tuple, Union + + +class HookContext: + config: SDKConfiguration + base_url: str + operation_id: str + oauth2_scopes: Optional[List[str]] = None + security_source: Optional[Union[Any, Callable[[], Any]]] = None + + def __init__( + self, + config: SDKConfiguration, + base_url: str, + operation_id: str, + oauth2_scopes: Optional[List[str]], + security_source: Optional[Union[Any, Callable[[], Any]]], + ): + self.config = config + self.base_url = base_url + self.operation_id = operation_id + self.oauth2_scopes = oauth2_scopes + self.security_source = security_source + + +class BeforeRequestContext(HookContext): + def __init__(self, hook_ctx: HookContext): + super().__init__( + hook_ctx.config, + hook_ctx.base_url, + hook_ctx.operation_id, + hook_ctx.oauth2_scopes, + hook_ctx.security_source, + ) + + +class AfterSuccessContext(HookContext): + def __init__(self, hook_ctx: HookContext): + super().__init__( + hook_ctx.config, + hook_ctx.base_url, + hook_ctx.operation_id, + hook_ctx.oauth2_scopes, + hook_ctx.security_source, + ) + + +class AfterErrorContext(HookContext): + def __init__(self, hook_ctx: HookContext): + super().__init__( + hook_ctx.config, + hook_ctx.base_url, + hook_ctx.operation_id, + hook_ctx.oauth2_scopes, + hook_ctx.security_source, + ) + + +class SDKInitHook(ABC): + @abstractmethod + def sdk_init(self, base_url: str, client: HttpClient) -> Tuple[str, HttpClient]: + pass + + +class BeforeRequestHook(ABC): + @abstractmethod + def before_request( + self, hook_ctx: BeforeRequestContext, request: httpx.Request + ) -> Union[httpx.Request, Exception]: + pass + + +class AfterSuccessHook(ABC): + @abstractmethod + def after_success( + self, hook_ctx: AfterSuccessContext, response: httpx.Response + ) -> Union[httpx.Response, Exception]: + pass + + +class AfterErrorHook(ABC): + @abstractmethod + def after_error( + self, + hook_ctx: AfterErrorContext, + response: Optional[httpx.Response], + error: Optional[Exception], + ) -> Union[Tuple[Optional[httpx.Response], Optional[Exception]], Exception]: + pass + + +class Hooks(ABC): + @abstractmethod + def register_sdk_init_hook(self, hook: SDKInitHook): + pass + + @abstractmethod + def register_before_request_hook(self, hook: BeforeRequestHook): + pass + + @abstractmethod + def register_after_success_hook(self, hook: AfterSuccessHook): + pass + + @abstractmethod + def register_after_error_hook(self, hook: AfterErrorHook): + pass diff --git a/src/mistralai/client/_version.py b/src/mistralai/client/_version.py new file mode 100644 index 00000000..805648e4 --- /dev/null +++ b/src/mistralai/client/_version.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: cc807b30de19 + +import importlib.metadata + +__title__: str = "mistralai" +__version__: str = "2.0.0rc1" +__openapi_doc_version__: str = "1.0.0" +__gen_version__: str = "2.841.0" +__user_agent__: str = "speakeasy-sdk/python 2.0.0rc1 2.841.0 1.0.0 mistralai" + +try: + if __package__ is not None: + __version__ = importlib.metadata.version(__package__) +except importlib.metadata.PackageNotFoundError: + pass diff --git a/src/mistralai/client/accesses.py b/src/mistralai/client/accesses.py new file mode 100644 index 00000000..0761b0bc --- /dev/null +++ b/src/mistralai/client/accesses.py @@ -0,0 +1,616 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 76fc53bfcf59 + +from .basesdk import BaseSDK +from mistralai.client import errors, models, utils +from mistralai.client._hooks import HookContext +from mistralai.client.types import OptionalNullable, UNSET +from mistralai.client.utils import get_security_from_env +from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response +from typing import Any, Mapping, Optional + + +class Accesses(BaseSDK): + r"""(beta) Libraries API - manage access to a library.""" + + def list( + self, + *, + library_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ListSharingOut: + r"""List all of the access to this library. + + Given a library, list all of the Entity that have access and to what level. + + :param library_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesShareListV1Request( + library_id=library_id, + ) + + req = self._build_request( + method="GET", + path="/v1/libraries/{library_id}/share", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_share_list_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ListSharingOut, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def list_async( + self, + *, + library_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ListSharingOut: + r"""List all of the access to this library. + + Given a library, list all of the Entity that have access and to what level. + + :param library_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesShareListV1Request( + library_id=library_id, + ) + + req = self._build_request_async( + method="GET", + path="/v1/libraries/{library_id}/share", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_share_list_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ListSharingOut, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def update_or_create( + self, + *, + library_id: str, + level: models.ShareEnum, + share_with_uuid: str, + share_with_type: models.EntityType, + org_id: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.SharingOut: + r"""Create or update an access level. + + Given a library id, you can create or update the access level of an entity. You have to be owner of the library to share a library. An owner cannot change their own role. A library cannot be shared outside of the organization. + + :param library_id: + :param level: + :param share_with_uuid: The id of the entity (user, workspace or organization) to share with + :param share_with_type: The type of entity, used to share a library. + :param org_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesShareCreateV1Request( + library_id=library_id, + sharing_in=models.SharingIn( + org_id=org_id, + level=level, + share_with_uuid=share_with_uuid, + share_with_type=share_with_type, + ), + ) + + req = self._build_request( + method="PUT", + path="/v1/libraries/{library_id}/share", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.sharing_in, False, False, "json", models.SharingIn + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_share_create_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.SharingOut, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def update_or_create_async( + self, + *, + library_id: str, + level: models.ShareEnum, + share_with_uuid: str, + share_with_type: models.EntityType, + org_id: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.SharingOut: + r"""Create or update an access level. + + Given a library id, you can create or update the access level of an entity. You have to be owner of the library to share a library. An owner cannot change their own role. A library cannot be shared outside of the organization. + + :param library_id: + :param level: + :param share_with_uuid: The id of the entity (user, workspace or organization) to share with + :param share_with_type: The type of entity, used to share a library. + :param org_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesShareCreateV1Request( + library_id=library_id, + sharing_in=models.SharingIn( + org_id=org_id, + level=level, + share_with_uuid=share_with_uuid, + share_with_type=share_with_type, + ), + ) + + req = self._build_request_async( + method="PUT", + path="/v1/libraries/{library_id}/share", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.sharing_in, False, False, "json", models.SharingIn + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_share_create_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.SharingOut, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def delete( + self, + *, + library_id: str, + share_with_uuid: str, + share_with_type: models.EntityType, + org_id: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.SharingOut: + r"""Delete an access level. + + Given a library id, you can delete the access level of an entity. An owner cannot delete it's own access. You have to be the owner of the library to delete an acces other than yours. + + :param library_id: + :param share_with_uuid: The id of the entity (user, workspace or organization) to share with + :param share_with_type: The type of entity, used to share a library. + :param org_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesShareDeleteV1Request( + library_id=library_id, + sharing_delete=models.SharingDelete( + org_id=org_id, + share_with_uuid=share_with_uuid, + share_with_type=share_with_type, + ), + ) + + req = self._build_request( + method="DELETE", + path="/v1/libraries/{library_id}/share", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.sharing_delete, False, False, "json", models.SharingDelete + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_share_delete_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.SharingOut, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def delete_async( + self, + *, + library_id: str, + share_with_uuid: str, + share_with_type: models.EntityType, + org_id: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.SharingOut: + r"""Delete an access level. + + Given a library id, you can delete the access level of an entity. An owner cannot delete it's own access. You have to be the owner of the library to delete an acces other than yours. + + :param library_id: + :param share_with_uuid: The id of the entity (user, workspace or organization) to share with + :param share_with_type: The type of entity, used to share a library. + :param org_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesShareDeleteV1Request( + library_id=library_id, + sharing_delete=models.SharingDelete( + org_id=org_id, + share_with_uuid=share_with_uuid, + share_with_type=share_with_type, + ), + ) + + req = self._build_request_async( + method="DELETE", + path="/v1/libraries/{library_id}/share", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.sharing_delete, False, False, "json", models.SharingDelete + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_share_delete_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.SharingOut, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/client/agents.py b/src/mistralai/client/agents.py new file mode 100644 index 00000000..2b70d152 --- /dev/null +++ b/src/mistralai/client/agents.py @@ -0,0 +1,686 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: e946546e3eaa + +from .basesdk import BaseSDK +from mistralai.client import errors, models, utils +from mistralai.client._hooks import HookContext +from mistralai.client.types import OptionalNullable, UNSET +from mistralai.client.utils import eventstreaming, get_security_from_env +from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response +from typing import Any, Dict, List, Mapping, Optional, Union + + +class Agents(BaseSDK): + r"""Agents API.""" + + def complete( + self, + *, + messages: Union[ + List[models.AgentsCompletionRequestMessage], + List[models.AgentsCompletionRequestMessageTypedDict], + ], + agent_id: str, + max_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = False, + stop: Optional[ + Union[ + models.AgentsCompletionRequestStop, + models.AgentsCompletionRequestStopTypedDict, + ] + ] = None, + random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + response_format: Optional[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = None, + tools: OptionalNullable[ + Union[List[models.Tool], List[models.ToolTypedDict]] + ] = UNSET, + tool_choice: Optional[ + Union[ + models.AgentsCompletionRequestToolChoice, + models.AgentsCompletionRequestToolChoiceTypedDict, + ] + ] = None, + presence_penalty: Optional[float] = None, + frequency_penalty: Optional[float] = None, + n: OptionalNullable[int] = UNSET, + prediction: Optional[ + Union[models.Prediction, models.PredictionTypedDict] + ] = None, + parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ChatCompletionResponse: + r"""Agents Completion + + :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. + :param agent_id: The ID of the agent to use for this completion. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + :param tools: + :param tool_choice: + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param n: Number of completions to return for each request, input tokens are only billed once. + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + :param parallel_tool_calls: + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsCompletionRequest( + max_tokens=max_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + metadata=metadata, + messages=utils.get_pydantic_model( + messages, List[models.AgentsCompletionRequestMessage] + ), + response_format=utils.get_pydantic_model( + response_format, Optional[models.ResponseFormat] + ), + tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), + tool_choice=utils.get_pydantic_model( + tool_choice, Optional[models.AgentsCompletionRequestToolChoice] + ), + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + n=n, + prediction=utils.get_pydantic_model( + prediction, Optional[models.Prediction] + ), + parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, + agent_id=agent_id, + ) + + req = self._build_request( + method="POST", + path="/v1/agents/completions", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.AgentsCompletionRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_completion_v1_agents_completions_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ChatCompletionResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def complete_async( + self, + *, + messages: Union[ + List[models.AgentsCompletionRequestMessage], + List[models.AgentsCompletionRequestMessageTypedDict], + ], + agent_id: str, + max_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = False, + stop: Optional[ + Union[ + models.AgentsCompletionRequestStop, + models.AgentsCompletionRequestStopTypedDict, + ] + ] = None, + random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + response_format: Optional[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = None, + tools: OptionalNullable[ + Union[List[models.Tool], List[models.ToolTypedDict]] + ] = UNSET, + tool_choice: Optional[ + Union[ + models.AgentsCompletionRequestToolChoice, + models.AgentsCompletionRequestToolChoiceTypedDict, + ] + ] = None, + presence_penalty: Optional[float] = None, + frequency_penalty: Optional[float] = None, + n: OptionalNullable[int] = UNSET, + prediction: Optional[ + Union[models.Prediction, models.PredictionTypedDict] + ] = None, + parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ChatCompletionResponse: + r"""Agents Completion + + :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. + :param agent_id: The ID of the agent to use for this completion. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + :param tools: + :param tool_choice: + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param n: Number of completions to return for each request, input tokens are only billed once. + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + :param parallel_tool_calls: + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsCompletionRequest( + max_tokens=max_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + metadata=metadata, + messages=utils.get_pydantic_model( + messages, List[models.AgentsCompletionRequestMessage] + ), + response_format=utils.get_pydantic_model( + response_format, Optional[models.ResponseFormat] + ), + tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), + tool_choice=utils.get_pydantic_model( + tool_choice, Optional[models.AgentsCompletionRequestToolChoice] + ), + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + n=n, + prediction=utils.get_pydantic_model( + prediction, Optional[models.Prediction] + ), + parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, + agent_id=agent_id, + ) + + req = self._build_request_async( + method="POST", + path="/v1/agents/completions", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.AgentsCompletionRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_completion_v1_agents_completions_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ChatCompletionResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def stream( + self, + *, + messages: Union[ + List[models.AgentsCompletionStreamRequestMessage], + List[models.AgentsCompletionStreamRequestMessageTypedDict], + ], + agent_id: str, + max_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = True, + stop: Optional[ + Union[ + models.AgentsCompletionStreamRequestStop, + models.AgentsCompletionStreamRequestStopTypedDict, + ] + ] = None, + random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + response_format: Optional[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = None, + tools: OptionalNullable[ + Union[List[models.Tool], List[models.ToolTypedDict]] + ] = UNSET, + tool_choice: Optional[ + Union[ + models.AgentsCompletionStreamRequestToolChoice, + models.AgentsCompletionStreamRequestToolChoiceTypedDict, + ] + ] = None, + presence_penalty: Optional[float] = None, + frequency_penalty: Optional[float] = None, + n: OptionalNullable[int] = UNSET, + prediction: Optional[ + Union[models.Prediction, models.PredictionTypedDict] + ] = None, + parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStream[models.CompletionEvent]: + r"""Stream Agents completion + + Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + + :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. + :param agent_id: The ID of the agent to use for this completion. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param stream: + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + :param tools: + :param tool_choice: + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param n: Number of completions to return for each request, input tokens are only billed once. + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + :param parallel_tool_calls: + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsCompletionStreamRequest( + max_tokens=max_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + metadata=metadata, + messages=utils.get_pydantic_model( + messages, List[models.AgentsCompletionStreamRequestMessage] + ), + response_format=utils.get_pydantic_model( + response_format, Optional[models.ResponseFormat] + ), + tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), + tool_choice=utils.get_pydantic_model( + tool_choice, Optional[models.AgentsCompletionStreamRequestToolChoice] + ), + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + n=n, + prediction=utils.get_pydantic_model( + prediction, Optional[models.Prediction] + ), + parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, + agent_id=agent_id, + ) + + req = self._build_request( + method="POST", + path="/v1/agents/completions#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.AgentsCompletionStreamRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="stream_agents", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStream( + http_res, + lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), + sentinel="[DONE]", + client_ref=self, + ) + if utils.match_response(http_res, "422", "application/json"): + http_res_text = utils.stream_to_text(http_res) + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res, http_res_text + ) + raise errors.HTTPValidationError(response_data, http_res, http_res_text) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) + + async def stream_async( + self, + *, + messages: Union[ + List[models.AgentsCompletionStreamRequestMessage], + List[models.AgentsCompletionStreamRequestMessageTypedDict], + ], + agent_id: str, + max_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = True, + stop: Optional[ + Union[ + models.AgentsCompletionStreamRequestStop, + models.AgentsCompletionStreamRequestStopTypedDict, + ] + ] = None, + random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + response_format: Optional[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = None, + tools: OptionalNullable[ + Union[List[models.Tool], List[models.ToolTypedDict]] + ] = UNSET, + tool_choice: Optional[ + Union[ + models.AgentsCompletionStreamRequestToolChoice, + models.AgentsCompletionStreamRequestToolChoiceTypedDict, + ] + ] = None, + presence_penalty: Optional[float] = None, + frequency_penalty: Optional[float] = None, + n: OptionalNullable[int] = UNSET, + prediction: Optional[ + Union[models.Prediction, models.PredictionTypedDict] + ] = None, + parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStreamAsync[models.CompletionEvent]: + r"""Stream Agents completion + + Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + + :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. + :param agent_id: The ID of the agent to use for this completion. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param stream: + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + :param tools: + :param tool_choice: + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param n: Number of completions to return for each request, input tokens are only billed once. + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + :param parallel_tool_calls: + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsCompletionStreamRequest( + max_tokens=max_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + metadata=metadata, + messages=utils.get_pydantic_model( + messages, List[models.AgentsCompletionStreamRequestMessage] + ), + response_format=utils.get_pydantic_model( + response_format, Optional[models.ResponseFormat] + ), + tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), + tool_choice=utils.get_pydantic_model( + tool_choice, Optional[models.AgentsCompletionStreamRequestToolChoice] + ), + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + n=n, + prediction=utils.get_pydantic_model( + prediction, Optional[models.Prediction] + ), + parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, + agent_id=agent_id, + ) + + req = self._build_request_async( + method="POST", + path="/v1/agents/completions#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.AgentsCompletionStreamRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="stream_agents", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStreamAsync( + http_res, + lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), + sentinel="[DONE]", + client_ref=self, + ) + if utils.match_response(http_res, "422", "application/json"): + http_res_text = await utils.stream_to_text_async(http_res) + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res, http_res_text + ) + raise errors.HTTPValidationError(response_data, http_res, http_res_text) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) diff --git a/src/mistralai/client/audio.py b/src/mistralai/client/audio.py new file mode 100644 index 00000000..f68f063c --- /dev/null +++ b/src/mistralai/client/audio.py @@ -0,0 +1,44 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 7a8ed2e90d61 + +from .basesdk import BaseSDK +from .sdkconfiguration import SDKConfiguration +from mistralai.client.transcriptions import Transcriptions +from typing import Optional + +# region imports +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from mistralai.extra.realtime import RealtimeTranscription +# endregion imports + + +class Audio(BaseSDK): + transcriptions: Transcriptions + r"""API for audio transcription.""" + + def __init__( + self, sdk_config: SDKConfiguration, parent_ref: Optional[object] = None + ) -> None: + BaseSDK.__init__(self, sdk_config, parent_ref=parent_ref) + self.sdk_configuration = sdk_config + self._init_sdks() + + def _init_sdks(self): + self.transcriptions = Transcriptions( + self.sdk_configuration, parent_ref=self.parent_ref + ) + + # region sdk-class-body + @property + def realtime(self) -> "RealtimeTranscription": + """Returns a client for real-time audio transcription via WebSocket.""" + if not hasattr(self, "_realtime"): + from mistralai.extra.realtime import RealtimeTranscription # pylint: disable=import-outside-toplevel + + self._realtime = RealtimeTranscription(self.sdk_configuration) # pylint: disable=attribute-defined-outside-init + + return self._realtime + + # endregion sdk-class-body diff --git a/src/mistralai/client/basesdk.py b/src/mistralai/client/basesdk.py new file mode 100644 index 00000000..a976121b --- /dev/null +++ b/src/mistralai/client/basesdk.py @@ -0,0 +1,385 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 7518c67b81ea + +from .sdkconfiguration import SDKConfiguration +import httpx +from mistralai.client import errors, models, utils +from mistralai.client._hooks import ( + AfterErrorContext, + AfterSuccessContext, + BeforeRequestContext, +) +from mistralai.client.utils import ( + RetryConfig, + SerializedRequestBody, + get_body_content, + run_sync_in_thread, +) +from typing import Callable, List, Mapping, Optional, Tuple +from urllib.parse import parse_qs, urlparse + + +class BaseSDK: + sdk_configuration: SDKConfiguration + parent_ref: Optional[object] = None + """ + Reference to the root SDK instance, if any. This will prevent it from + being garbage collected while there are active streams. + """ + + def __init__( + self, + sdk_config: SDKConfiguration, + parent_ref: Optional[object] = None, + ) -> None: + self.sdk_configuration = sdk_config + self.parent_ref = parent_ref + + def _get_url(self, base_url, url_variables): + sdk_url, sdk_variables = self.sdk_configuration.get_server_details() + + if base_url is None: + base_url = sdk_url + + if url_variables is None: + url_variables = sdk_variables + + return utils.template_url(base_url, url_variables) + + def _build_request_async( + self, + method, + path, + base_url, + url_variables, + request, + request_body_required, + request_has_path_params, + request_has_query_params, + user_agent_header, + accept_header_value, + _globals=None, + security=None, + timeout_ms: Optional[int] = None, + get_serialized_body: Optional[ + Callable[[], Optional[SerializedRequestBody]] + ] = None, + url_override: Optional[str] = None, + http_headers: Optional[Mapping[str, str]] = None, + allow_empty_value: Optional[List[str]] = None, + ) -> httpx.Request: + client = self.sdk_configuration.async_client + return self._build_request_with_client( + client, + method, + path, + base_url, + url_variables, + request, + request_body_required, + request_has_path_params, + request_has_query_params, + user_agent_header, + accept_header_value, + _globals, + security, + timeout_ms, + get_serialized_body, + url_override, + http_headers, + allow_empty_value, + ) + + def _build_request( + self, + method, + path, + base_url, + url_variables, + request, + request_body_required, + request_has_path_params, + request_has_query_params, + user_agent_header, + accept_header_value, + _globals=None, + security=None, + timeout_ms: Optional[int] = None, + get_serialized_body: Optional[ + Callable[[], Optional[SerializedRequestBody]] + ] = None, + url_override: Optional[str] = None, + http_headers: Optional[Mapping[str, str]] = None, + allow_empty_value: Optional[List[str]] = None, + ) -> httpx.Request: + client = self.sdk_configuration.client + return self._build_request_with_client( + client, + method, + path, + base_url, + url_variables, + request, + request_body_required, + request_has_path_params, + request_has_query_params, + user_agent_header, + accept_header_value, + _globals, + security, + timeout_ms, + get_serialized_body, + url_override, + http_headers, + allow_empty_value, + ) + + def _build_request_with_client( + self, + client, + method, + path, + base_url, + url_variables, + request, + request_body_required, + request_has_path_params, + request_has_query_params, + user_agent_header, + accept_header_value, + _globals=None, + security=None, + timeout_ms: Optional[int] = None, + get_serialized_body: Optional[ + Callable[[], Optional[SerializedRequestBody]] + ] = None, + url_override: Optional[str] = None, + http_headers: Optional[Mapping[str, str]] = None, + allow_empty_value: Optional[List[str]] = None, + ) -> httpx.Request: + query_params = {} + + url = url_override + if url is None: + url = utils.generate_url( + self._get_url(base_url, url_variables), + path, + request if request_has_path_params else None, + _globals if request_has_path_params else None, + ) + + query_params = utils.get_query_params( + request if request_has_query_params else None, + _globals if request_has_query_params else None, + allow_empty_value, + ) + else: + # Pick up the query parameter from the override so they can be + # preserved when building the request later on (necessary as of + # httpx 0.28). + parsed_override = urlparse(str(url_override)) + query_params = parse_qs(parsed_override.query, keep_blank_values=True) + + headers = utils.get_headers(request, _globals) + headers["Accept"] = accept_header_value + headers[user_agent_header] = self.sdk_configuration.user_agent + + if security is not None: + if callable(security): + security = security() + security = utils.get_security_from_env(security, models.Security) + if security is not None: + security_headers, security_query_params = utils.get_security(security) + headers = {**headers, **security_headers} + query_params = {**query_params, **security_query_params} + + serialized_request_body = SerializedRequestBody() + if get_serialized_body is not None: + rb = get_serialized_body() + if request_body_required and rb is None: + raise ValueError("request body is required") + + if rb is not None: + serialized_request_body = rb + + if ( + serialized_request_body.media_type is not None + and serialized_request_body.media_type + not in ( + "multipart/form-data", + "multipart/mixed", + ) + ): + headers["content-type"] = serialized_request_body.media_type + + if http_headers is not None: + for header, value in http_headers.items(): + headers[header] = value + + timeout = timeout_ms / 1000 if timeout_ms is not None else None + + return client.build_request( + method, + url, + params=query_params, + content=serialized_request_body.content, + data=serialized_request_body.data, + files=serialized_request_body.files, + headers=headers, + timeout=timeout, + ) + + def do_request( + self, + hook_ctx, + request, + error_status_codes, + stream=False, + retry_config: Optional[Tuple[RetryConfig, List[str]]] = None, + ) -> httpx.Response: + client = self.sdk_configuration.client + logger = self.sdk_configuration.debug_logger + + hooks = self.sdk_configuration.__dict__["_hooks"] + + def do(): + http_res = None + try: + req = hooks.before_request(BeforeRequestContext(hook_ctx), request) + logger.debug( + "Request:\nMethod: %s\nURL: %s\nHeaders: %s\nBody: %s", + req.method, + req.url, + req.headers, + get_body_content(req), + ) + + if client is None: + raise ValueError("client is required") + + http_res = client.send(req, stream=stream) + except Exception as e: + _, e = hooks.after_error(AfterErrorContext(hook_ctx), None, e) + if e is not None: + logger.debug("Request Exception", exc_info=True) + raise e + + if http_res is None: + logger.debug("Raising no response SDK error") + raise errors.NoResponseError("No response received") + + logger.debug( + "Response:\nStatus Code: %s\nURL: %s\nHeaders: %s\nBody: %s", + http_res.status_code, + http_res.url, + http_res.headers, + "" if stream else http_res.text, + ) + + if utils.match_status_codes(error_status_codes, http_res.status_code): + result, err = hooks.after_error( + AfterErrorContext(hook_ctx), http_res, None + ) + if err is not None: + logger.debug("Request Exception", exc_info=True) + raise err + if result is not None: + http_res = result + else: + logger.debug("Raising unexpected SDK error") + raise errors.SDKError("Unexpected error occurred", http_res) + + return http_res + + if retry_config is not None: + http_res = utils.retry(do, utils.Retries(retry_config[0], retry_config[1])) + else: + http_res = do() + + if not utils.match_status_codes(error_status_codes, http_res.status_code): + http_res = hooks.after_success(AfterSuccessContext(hook_ctx), http_res) + + return http_res + + async def do_request_async( + self, + hook_ctx, + request, + error_status_codes, + stream=False, + retry_config: Optional[Tuple[RetryConfig, List[str]]] = None, + ) -> httpx.Response: + client = self.sdk_configuration.async_client + logger = self.sdk_configuration.debug_logger + + hooks = self.sdk_configuration.__dict__["_hooks"] + + async def do(): + http_res = None + try: + req = await run_sync_in_thread( + hooks.before_request, BeforeRequestContext(hook_ctx), request + ) + + logger.debug( + "Request:\nMethod: %s\nURL: %s\nHeaders: %s\nBody: %s", + req.method, + req.url, + req.headers, + get_body_content(req), + ) + + if client is None: + raise ValueError("client is required") + + http_res = await client.send(req, stream=stream) + except Exception as e: + _, e = await run_sync_in_thread( + hooks.after_error, AfterErrorContext(hook_ctx), None, e + ) + + if e is not None: + logger.debug("Request Exception", exc_info=True) + raise e + + if http_res is None: + logger.debug("Raising no response SDK error") + raise errors.NoResponseError("No response received") + + logger.debug( + "Response:\nStatus Code: %s\nURL: %s\nHeaders: %s\nBody: %s", + http_res.status_code, + http_res.url, + http_res.headers, + "" if stream else http_res.text, + ) + + if utils.match_status_codes(error_status_codes, http_res.status_code): + result, err = await run_sync_in_thread( + hooks.after_error, AfterErrorContext(hook_ctx), http_res, None + ) + + if err is not None: + logger.debug("Request Exception", exc_info=True) + raise err + if result is not None: + http_res = result + else: + logger.debug("Raising unexpected SDK error") + raise errors.SDKError("Unexpected error occurred", http_res) + + return http_res + + if retry_config is not None: + http_res = await utils.retry_async( + do, utils.Retries(retry_config[0], retry_config[1]) + ) + else: + http_res = await do() + + if not utils.match_status_codes(error_status_codes, http_res.status_code): + http_res = await run_sync_in_thread( + hooks.after_success, AfterSuccessContext(hook_ctx), http_res + ) + + return http_res diff --git a/src/mistralai/client/batch.py b/src/mistralai/client/batch.py new file mode 100644 index 00000000..7e36fd0d --- /dev/null +++ b/src/mistralai/client/batch.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: cffe114c7ac7 + +from .basesdk import BaseSDK +from .sdkconfiguration import SDKConfiguration +from mistralai.client.batch_jobs import BatchJobs +from typing import Optional + + +class Batch(BaseSDK): + jobs: BatchJobs + + def __init__( + self, sdk_config: SDKConfiguration, parent_ref: Optional[object] = None + ) -> None: + BaseSDK.__init__(self, sdk_config, parent_ref=parent_ref) + self.sdk_configuration = sdk_config + self._init_sdks() + + def _init_sdks(self): + self.jobs = BatchJobs(self.sdk_configuration, parent_ref=self.parent_ref) diff --git a/src/mistralai/client/batch_jobs.py b/src/mistralai/client/batch_jobs.py new file mode 100644 index 00000000..0e135b30 --- /dev/null +++ b/src/mistralai/client/batch_jobs.py @@ -0,0 +1,795 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 3423fec25840 + +from .basesdk import BaseSDK +from datetime import datetime +from mistralai.client import errors, models, utils +from mistralai.client._hooks import HookContext +from mistralai.client.types import OptionalNullable, UNSET +from mistralai.client.utils import get_security_from_env +from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response +from typing import Any, Dict, List, Mapping, Optional, Union + + +class BatchJobs(BaseSDK): + def list( + self, + *, + page: Optional[int] = 0, + page_size: Optional[int] = 100, + model: OptionalNullable[str] = UNSET, + agent_id: OptionalNullable[str] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + created_after: OptionalNullable[datetime] = UNSET, + created_by_me: Optional[bool] = False, + status: OptionalNullable[List[models.BatchJobStatus]] = UNSET, + order_by: Optional[models.OrderBy] = "-created", + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ListBatchJobsResponse: + r"""Get Batch Jobs + + Get a list of batch jobs for your organization and user. + + :param page: + :param page_size: + :param model: + :param agent_id: + :param metadata: + :param created_after: + :param created_by_me: + :param status: + :param order_by: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JobsAPIRoutesBatchGetBatchJobsRequest( + page=page, + page_size=page_size, + model=model, + agent_id=agent_id, + metadata=metadata, + created_after=created_after, + created_by_me=created_by_me, + status=status, + order_by=order_by, + ) + + req = self._build_request( + method="GET", + path="/v1/batch/jobs", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_batch_get_batch_jobs", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ListBatchJobsResponse, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def list_async( + self, + *, + page: Optional[int] = 0, + page_size: Optional[int] = 100, + model: OptionalNullable[str] = UNSET, + agent_id: OptionalNullable[str] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + created_after: OptionalNullable[datetime] = UNSET, + created_by_me: Optional[bool] = False, + status: OptionalNullable[List[models.BatchJobStatus]] = UNSET, + order_by: Optional[models.OrderBy] = "-created", + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ListBatchJobsResponse: + r"""Get Batch Jobs + + Get a list of batch jobs for your organization and user. + + :param page: + :param page_size: + :param model: + :param agent_id: + :param metadata: + :param created_after: + :param created_by_me: + :param status: + :param order_by: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JobsAPIRoutesBatchGetBatchJobsRequest( + page=page, + page_size=page_size, + model=model, + agent_id=agent_id, + metadata=metadata, + created_after=created_after, + created_by_me=created_by_me, + status=status, + order_by=order_by, + ) + + req = self._build_request_async( + method="GET", + path="/v1/batch/jobs", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_batch_get_batch_jobs", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ListBatchJobsResponse, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def create( + self, + *, + endpoint: models.APIEndpoint, + input_files: OptionalNullable[List[str]] = UNSET, + requests: OptionalNullable[ + Union[List[models.BatchRequest], List[models.BatchRequestTypedDict]] + ] = UNSET, + model: OptionalNullable[str] = UNSET, + agent_id: OptionalNullable[str] = UNSET, + metadata: OptionalNullable[Dict[str, str]] = UNSET, + timeout_hours: Optional[int] = 24, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.BatchJob: + r"""Create Batch Job + + Create a new batch job, it will be queued for processing. + + :param endpoint: + :param input_files: The list of input files to be used for batch inference, these files should be `jsonl` files, containing the input data corresponding to the bory request for the batch inference in a \"body\" field. An example of such file is the following: ```json {\"custom_id\": \"0\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French cheese?\"}]}} {\"custom_id\": \"1\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French wine?\"}]}} ``` + :param requests: + :param model: The model to be used for batch inference. + :param agent_id: In case you want to use a specific agent from the **deprecated** agents api for batch inference, you can specify the agent ID here. + :param metadata: The metadata of your choice to be associated with the batch inference job. + :param timeout_hours: The timeout in hours for the batch inference job. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.CreateBatchJobRequest( + input_files=input_files, + requests=utils.get_pydantic_model( + requests, OptionalNullable[List[models.BatchRequest]] + ), + endpoint=endpoint, + model=model, + agent_id=agent_id, + metadata=metadata, + timeout_hours=timeout_hours, + ) + + req = self._build_request( + method="POST", + path="/v1/batch/jobs", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.CreateBatchJobRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_batch_create_batch_job", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.BatchJob, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def create_async( + self, + *, + endpoint: models.APIEndpoint, + input_files: OptionalNullable[List[str]] = UNSET, + requests: OptionalNullable[ + Union[List[models.BatchRequest], List[models.BatchRequestTypedDict]] + ] = UNSET, + model: OptionalNullable[str] = UNSET, + agent_id: OptionalNullable[str] = UNSET, + metadata: OptionalNullable[Dict[str, str]] = UNSET, + timeout_hours: Optional[int] = 24, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.BatchJob: + r"""Create Batch Job + + Create a new batch job, it will be queued for processing. + + :param endpoint: + :param input_files: The list of input files to be used for batch inference, these files should be `jsonl` files, containing the input data corresponding to the bory request for the batch inference in a \"body\" field. An example of such file is the following: ```json {\"custom_id\": \"0\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French cheese?\"}]}} {\"custom_id\": \"1\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French wine?\"}]}} ``` + :param requests: + :param model: The model to be used for batch inference. + :param agent_id: In case you want to use a specific agent from the **deprecated** agents api for batch inference, you can specify the agent ID here. + :param metadata: The metadata of your choice to be associated with the batch inference job. + :param timeout_hours: The timeout in hours for the batch inference job. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.CreateBatchJobRequest( + input_files=input_files, + requests=utils.get_pydantic_model( + requests, OptionalNullable[List[models.BatchRequest]] + ), + endpoint=endpoint, + model=model, + agent_id=agent_id, + metadata=metadata, + timeout_hours=timeout_hours, + ) + + req = self._build_request_async( + method="POST", + path="/v1/batch/jobs", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.CreateBatchJobRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_batch_create_batch_job", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.BatchJob, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def get( + self, + *, + job_id: str, + inline: OptionalNullable[bool] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.BatchJob: + r"""Get Batch Job + + Get a batch job details by its UUID. + + Args: + inline: If True, return results inline in the response. + + :param job_id: + :param inline: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JobsAPIRoutesBatchGetBatchJobRequest( + job_id=job_id, + inline=inline, + ) + + req = self._build_request( + method="GET", + path="/v1/batch/jobs/{job_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_batch_get_batch_job", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.BatchJob, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def get_async( + self, + *, + job_id: str, + inline: OptionalNullable[bool] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.BatchJob: + r"""Get Batch Job + + Get a batch job details by its UUID. + + Args: + inline: If True, return results inline in the response. + + :param job_id: + :param inline: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JobsAPIRoutesBatchGetBatchJobRequest( + job_id=job_id, + inline=inline, + ) + + req = self._build_request_async( + method="GET", + path="/v1/batch/jobs/{job_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_batch_get_batch_job", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.BatchJob, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def cancel( + self, + *, + job_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.BatchJob: + r"""Cancel Batch Job + + Request the cancellation of a batch job. + + :param job_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JobsAPIRoutesBatchCancelBatchJobRequest( + job_id=job_id, + ) + + req = self._build_request( + method="POST", + path="/v1/batch/jobs/{job_id}/cancel", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_batch_cancel_batch_job", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.BatchJob, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def cancel_async( + self, + *, + job_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.BatchJob: + r"""Cancel Batch Job + + Request the cancellation of a batch job. + + :param job_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JobsAPIRoutesBatchCancelBatchJobRequest( + job_id=job_id, + ) + + req = self._build_request_async( + method="POST", + path="/v1/batch/jobs/{job_id}/cancel", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_batch_cancel_batch_job", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.BatchJob, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/client/beta.py b/src/mistralai/client/beta.py new file mode 100644 index 00000000..65b761d1 --- /dev/null +++ b/src/mistralai/client/beta.py @@ -0,0 +1,32 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 981417f45147 + +from .basesdk import BaseSDK +from .sdkconfiguration import SDKConfiguration +from mistralai.client.beta_agents import BetaAgents +from mistralai.client.conversations import Conversations +from mistralai.client.libraries import Libraries +from typing import Optional + + +class Beta(BaseSDK): + conversations: Conversations + r"""(beta) Conversations API""" + agents: BetaAgents + r"""(beta) Agents API""" + libraries: Libraries + r"""(beta) Libraries API to create and manage libraries - index your documents to enhance agent capabilities.""" + + def __init__( + self, sdk_config: SDKConfiguration, parent_ref: Optional[object] = None + ) -> None: + BaseSDK.__init__(self, sdk_config, parent_ref=parent_ref) + self.sdk_configuration = sdk_config + self._init_sdks() + + def _init_sdks(self): + self.conversations = Conversations( + self.sdk_configuration, parent_ref=self.parent_ref + ) + self.agents = BetaAgents(self.sdk_configuration, parent_ref=self.parent_ref) + self.libraries = Libraries(self.sdk_configuration, parent_ref=self.parent_ref) diff --git a/src/mistralai/client/beta_agents.py b/src/mistralai/client/beta_agents.py new file mode 100644 index 00000000..157c5de4 --- /dev/null +++ b/src/mistralai/client/beta_agents.py @@ -0,0 +1,2266 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: b64ad29b7174 + +from .basesdk import BaseSDK +from mistralai.client import errors, models, utils +from mistralai.client._hooks import HookContext +from mistralai.client.types import OptionalNullable, UNSET +from mistralai.client.utils import get_security_from_env +from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response +from typing import Any, Dict, List, Mapping, Optional, Union + + +class BetaAgents(BaseSDK): + r"""(beta) Agents API""" + + def create( + self, + *, + model: str, + name: str, + instructions: OptionalNullable[str] = UNSET, + tools: Optional[ + Union[ + List[models.CreateAgentRequestTool], + List[models.CreateAgentRequestToolTypedDict], + ] + ] = None, + completion_args: Optional[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = None, + description: OptionalNullable[str] = UNSET, + handoffs: OptionalNullable[List[str]] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + version_message: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Agent: + r"""Create a agent that can be used within a conversation. + + Create a new agent giving it instructions, tools, description. The agent is then available to be used as a regular assistant in a conversation or as part of an agent pool from which it can be used. + + :param model: + :param name: + :param instructions: Instruction prompt the model will follow during the conversation. + :param tools: List of tools which are available to the model during the conversation. + :param completion_args: White-listed arguments from the completion API + :param description: + :param handoffs: + :param metadata: + :param version_message: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.CreateAgentRequest( + instructions=instructions, + tools=utils.get_pydantic_model( + tools, Optional[List[models.CreateAgentRequestTool]] + ), + completion_args=utils.get_pydantic_model( + completion_args, Optional[models.CompletionArgs] + ), + model=model, + name=name, + description=description, + handoffs=handoffs, + metadata=metadata, + version_message=version_message, + ) + + req = self._build_request( + method="POST", + path="/v1/agents", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.CreateAgentRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_create", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.Agent, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def create_async( + self, + *, + model: str, + name: str, + instructions: OptionalNullable[str] = UNSET, + tools: Optional[ + Union[ + List[models.CreateAgentRequestTool], + List[models.CreateAgentRequestToolTypedDict], + ] + ] = None, + completion_args: Optional[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = None, + description: OptionalNullable[str] = UNSET, + handoffs: OptionalNullable[List[str]] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + version_message: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Agent: + r"""Create a agent that can be used within a conversation. + + Create a new agent giving it instructions, tools, description. The agent is then available to be used as a regular assistant in a conversation or as part of an agent pool from which it can be used. + + :param model: + :param name: + :param instructions: Instruction prompt the model will follow during the conversation. + :param tools: List of tools which are available to the model during the conversation. + :param completion_args: White-listed arguments from the completion API + :param description: + :param handoffs: + :param metadata: + :param version_message: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.CreateAgentRequest( + instructions=instructions, + tools=utils.get_pydantic_model( + tools, Optional[List[models.CreateAgentRequestTool]] + ), + completion_args=utils.get_pydantic_model( + completion_args, Optional[models.CompletionArgs] + ), + model=model, + name=name, + description=description, + handoffs=handoffs, + metadata=metadata, + version_message=version_message, + ) + + req = self._build_request_async( + method="POST", + path="/v1/agents", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.CreateAgentRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_create", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.Agent, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def list( + self, + *, + page: Optional[int] = 0, + page_size: Optional[int] = 20, + deployment_chat: OptionalNullable[bool] = UNSET, + sources: OptionalNullable[List[models.RequestSource]] = UNSET, + name: OptionalNullable[str] = UNSET, + search: OptionalNullable[str] = UNSET, + id: OptionalNullable[str] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> List[models.Agent]: + r"""List agent entities. + + Retrieve a list of agent entities sorted by creation time. + + :param page: Page number (0-indexed) + :param page_size: Number of agents per page + :param deployment_chat: + :param sources: + :param name: Filter by agent name + :param search: Search agents by name or ID + :param id: + :param metadata: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsListRequest( + page=page, + page_size=page_size, + deployment_chat=deployment_chat, + sources=sources, + name=name, + search=search, + id=id, + metadata=metadata, + ) + + req = self._build_request( + method="GET", + path="/v1/agents", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_list", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(List[models.Agent], http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def list_async( + self, + *, + page: Optional[int] = 0, + page_size: Optional[int] = 20, + deployment_chat: OptionalNullable[bool] = UNSET, + sources: OptionalNullable[List[models.RequestSource]] = UNSET, + name: OptionalNullable[str] = UNSET, + search: OptionalNullable[str] = UNSET, + id: OptionalNullable[str] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> List[models.Agent]: + r"""List agent entities. + + Retrieve a list of agent entities sorted by creation time. + + :param page: Page number (0-indexed) + :param page_size: Number of agents per page + :param deployment_chat: + :param sources: + :param name: Filter by agent name + :param search: Search agents by name or ID + :param id: + :param metadata: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsListRequest( + page=page, + page_size=page_size, + deployment_chat=deployment_chat, + sources=sources, + name=name, + search=search, + id=id, + metadata=metadata, + ) + + req = self._build_request_async( + method="GET", + path="/v1/agents", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_list", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(List[models.Agent], http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def get( + self, + *, + agent_id: str, + agent_version: OptionalNullable[ + Union[ + models.AgentsAPIV1AgentsGetAgentVersion, + models.AgentsAPIV1AgentsGetAgentVersionTypedDict, + ] + ] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Agent: + r"""Retrieve an agent entity. + + Given an agent, retrieve an agent entity with its attributes. The agent_version parameter can be an integer version number or a string alias. + + :param agent_id: + :param agent_version: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsGetRequest( + agent_id=agent_id, + agent_version=agent_version, + ) + + req = self._build_request( + method="GET", + path="/v1/agents/{agent_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_get", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.Agent, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def get_async( + self, + *, + agent_id: str, + agent_version: OptionalNullable[ + Union[ + models.AgentsAPIV1AgentsGetAgentVersion, + models.AgentsAPIV1AgentsGetAgentVersionTypedDict, + ] + ] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Agent: + r"""Retrieve an agent entity. + + Given an agent, retrieve an agent entity with its attributes. The agent_version parameter can be an integer version number or a string alias. + + :param agent_id: + :param agent_version: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsGetRequest( + agent_id=agent_id, + agent_version=agent_version, + ) + + req = self._build_request_async( + method="GET", + path="/v1/agents/{agent_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_get", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.Agent, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def update( + self, + *, + agent_id: str, + instructions: OptionalNullable[str] = UNSET, + tools: Optional[ + Union[ + List[models.UpdateAgentRequestTool], + List[models.UpdateAgentRequestToolTypedDict], + ] + ] = None, + completion_args: Optional[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = None, + model: OptionalNullable[str] = UNSET, + name: OptionalNullable[str] = UNSET, + description: OptionalNullable[str] = UNSET, + handoffs: OptionalNullable[List[str]] = UNSET, + deployment_chat: OptionalNullable[bool] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + version_message: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Agent: + r"""Update an agent entity. + + Update an agent attributes and create a new version. + + :param agent_id: + :param instructions: Instruction prompt the model will follow during the conversation. + :param tools: List of tools which are available to the model during the conversation. + :param completion_args: White-listed arguments from the completion API + :param model: + :param name: + :param description: + :param handoffs: + :param deployment_chat: + :param metadata: + :param version_message: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsUpdateRequest( + agent_id=agent_id, + update_agent_request=models.UpdateAgentRequest( + instructions=instructions, + tools=utils.get_pydantic_model( + tools, Optional[List[models.UpdateAgentRequestTool]] + ), + completion_args=utils.get_pydantic_model( + completion_args, Optional[models.CompletionArgs] + ), + model=model, + name=name, + description=description, + handoffs=handoffs, + deployment_chat=deployment_chat, + metadata=metadata, + version_message=version_message, + ), + ) + + req = self._build_request( + method="PATCH", + path="/v1/agents/{agent_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.update_agent_request, + False, + False, + "json", + models.UpdateAgentRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_update", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.Agent, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def update_async( + self, + *, + agent_id: str, + instructions: OptionalNullable[str] = UNSET, + tools: Optional[ + Union[ + List[models.UpdateAgentRequestTool], + List[models.UpdateAgentRequestToolTypedDict], + ] + ] = None, + completion_args: Optional[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = None, + model: OptionalNullable[str] = UNSET, + name: OptionalNullable[str] = UNSET, + description: OptionalNullable[str] = UNSET, + handoffs: OptionalNullable[List[str]] = UNSET, + deployment_chat: OptionalNullable[bool] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + version_message: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Agent: + r"""Update an agent entity. + + Update an agent attributes and create a new version. + + :param agent_id: + :param instructions: Instruction prompt the model will follow during the conversation. + :param tools: List of tools which are available to the model during the conversation. + :param completion_args: White-listed arguments from the completion API + :param model: + :param name: + :param description: + :param handoffs: + :param deployment_chat: + :param metadata: + :param version_message: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsUpdateRequest( + agent_id=agent_id, + update_agent_request=models.UpdateAgentRequest( + instructions=instructions, + tools=utils.get_pydantic_model( + tools, Optional[List[models.UpdateAgentRequestTool]] + ), + completion_args=utils.get_pydantic_model( + completion_args, Optional[models.CompletionArgs] + ), + model=model, + name=name, + description=description, + handoffs=handoffs, + deployment_chat=deployment_chat, + metadata=metadata, + version_message=version_message, + ), + ) + + req = self._build_request_async( + method="PATCH", + path="/v1/agents/{agent_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.update_agent_request, + False, + False, + "json", + models.UpdateAgentRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_update", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.Agent, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def delete( + self, + *, + agent_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ): + r"""Delete an agent entity. + + :param agent_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsDeleteRequest( + agent_id=agent_id, + ) + + req = self._build_request( + method="DELETE", + path="/v1/agents/{agent_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_delete", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "204", "*"): + return + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def delete_async( + self, + *, + agent_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ): + r"""Delete an agent entity. + + :param agent_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsDeleteRequest( + agent_id=agent_id, + ) + + req = self._build_request_async( + method="DELETE", + path="/v1/agents/{agent_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_delete", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "204", "*"): + return + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def update_version( + self, + *, + agent_id: str, + version: int, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Agent: + r"""Update an agent version. + + Switch the version of an agent. + + :param agent_id: + :param version: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsUpdateVersionRequest( + agent_id=agent_id, + version=version, + ) + + req = self._build_request( + method="PATCH", + path="/v1/agents/{agent_id}/version", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_update_version", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.Agent, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def update_version_async( + self, + *, + agent_id: str, + version: int, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Agent: + r"""Update an agent version. + + Switch the version of an agent. + + :param agent_id: + :param version: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsUpdateVersionRequest( + agent_id=agent_id, + version=version, + ) + + req = self._build_request_async( + method="PATCH", + path="/v1/agents/{agent_id}/version", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_update_version", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.Agent, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def list_versions( + self, + *, + agent_id: str, + page: Optional[int] = 0, + page_size: Optional[int] = 20, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> List[models.Agent]: + r"""List all versions of an agent. + + Retrieve all versions for a specific agent with full agent context. Supports pagination. + + :param agent_id: + :param page: Page number (0-indexed) + :param page_size: Number of versions per page + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsListVersionsRequest( + agent_id=agent_id, + page=page, + page_size=page_size, + ) + + req = self._build_request( + method="GET", + path="/v1/agents/{agent_id}/versions", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_list_versions", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(List[models.Agent], http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def list_versions_async( + self, + *, + agent_id: str, + page: Optional[int] = 0, + page_size: Optional[int] = 20, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> List[models.Agent]: + r"""List all versions of an agent. + + Retrieve all versions for a specific agent with full agent context. Supports pagination. + + :param agent_id: + :param page: Page number (0-indexed) + :param page_size: Number of versions per page + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsListVersionsRequest( + agent_id=agent_id, + page=page, + page_size=page_size, + ) + + req = self._build_request_async( + method="GET", + path="/v1/agents/{agent_id}/versions", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_list_versions", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(List[models.Agent], http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def get_version( + self, + *, + agent_id: str, + version: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Agent: + r"""Retrieve a specific version of an agent. + + Get a specific agent version by version number. + + :param agent_id: + :param version: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsGetVersionRequest( + agent_id=agent_id, + version=version, + ) + + req = self._build_request( + method="GET", + path="/v1/agents/{agent_id}/versions/{version}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_get_version", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.Agent, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def get_version_async( + self, + *, + agent_id: str, + version: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Agent: + r"""Retrieve a specific version of an agent. + + Get a specific agent version by version number. + + :param agent_id: + :param version: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsGetVersionRequest( + agent_id=agent_id, + version=version, + ) + + req = self._build_request_async( + method="GET", + path="/v1/agents/{agent_id}/versions/{version}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_get_version", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.Agent, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def create_version_alias( + self, + *, + agent_id: str, + alias: str, + version: int, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.AgentAliasResponse: + r"""Create or update an agent version alias. + + Create a new alias or update an existing alias to point to a specific version. Aliases are unique per agent and can be reassigned to different versions. + + :param agent_id: + :param alias: + :param version: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsCreateOrUpdateAliasRequest( + agent_id=agent_id, + alias=alias, + version=version, + ) + + req = self._build_request( + method="PUT", + path="/v1/agents/{agent_id}/aliases", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_create_or_update_alias", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.AgentAliasResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def create_version_alias_async( + self, + *, + agent_id: str, + alias: str, + version: int, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.AgentAliasResponse: + r"""Create or update an agent version alias. + + Create a new alias or update an existing alias to point to a specific version. Aliases are unique per agent and can be reassigned to different versions. + + :param agent_id: + :param alias: + :param version: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsCreateOrUpdateAliasRequest( + agent_id=agent_id, + alias=alias, + version=version, + ) + + req = self._build_request_async( + method="PUT", + path="/v1/agents/{agent_id}/aliases", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_create_or_update_alias", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.AgentAliasResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def list_version_aliases( + self, + *, + agent_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> List[models.AgentAliasResponse]: + r"""List all aliases for an agent. + + Retrieve all version aliases for a specific agent. + + :param agent_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsListVersionAliasesRequest( + agent_id=agent_id, + ) + + req = self._build_request( + method="GET", + path="/v1/agents/{agent_id}/aliases", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_list_version_aliases", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(List[models.AgentAliasResponse], http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def list_version_aliases_async( + self, + *, + agent_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> List[models.AgentAliasResponse]: + r"""List all aliases for an agent. + + Retrieve all version aliases for a specific agent. + + :param agent_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsListVersionAliasesRequest( + agent_id=agent_id, + ) + + req = self._build_request_async( + method="GET", + path="/v1/agents/{agent_id}/aliases", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_list_version_aliases", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(List[models.AgentAliasResponse], http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def delete_version_alias( + self, + *, + agent_id: str, + alias: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ): + r"""Delete an agent version alias. + + Delete an existing alias for an agent. + + :param agent_id: + :param alias: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsDeleteAliasRequest( + agent_id=agent_id, + alias=alias, + ) + + req = self._build_request( + method="DELETE", + path="/v1/agents/{agent_id}/aliases", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_delete_alias", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "204", "*"): + return + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def delete_version_alias_async( + self, + *, + agent_id: str, + alias: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ): + r"""Delete an agent version alias. + + Delete an existing alias for an agent. + + :param agent_id: + :param alias: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsDeleteAliasRequest( + agent_id=agent_id, + alias=alias, + ) + + req = self._build_request_async( + method="DELETE", + path="/v1/agents/{agent_id}/aliases", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_delete_alias", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "204", "*"): + return + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/client/chat.py b/src/mistralai/client/chat.py new file mode 100644 index 00000000..13b9c01f --- /dev/null +++ b/src/mistralai/client/chat.py @@ -0,0 +1,807 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 7eba0f088d47 + +from .basesdk import BaseSDK +from mistralai.client import errors, models, utils +from mistralai.client._hooks import HookContext +from mistralai.client.types import OptionalNullable, UNSET +from mistralai.client.utils import eventstreaming, get_security_from_env +from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response +from typing import Any, Dict, List, Mapping, Optional, Union + +# region imports +from typing import Type + +from mistralai.extra.struct_chat import ( + ParsedChatCompletionResponse, + convert_to_parsed_chat_completion_response, +) +from mistralai.extra.utils.response_format import ( + CustomPydanticModel, + response_format_from_pydantic_model, +) +# endregion imports + + +class Chat(BaseSDK): + r"""Chat Completion API.""" + + # region sdk-class-body + # Custom .parse methods for the Structure Outputs Feature. + + def parse( + self, response_format: Type[CustomPydanticModel], **kwargs: Any + ) -> ParsedChatCompletionResponse[CustomPydanticModel]: + """ + Parse the response using the provided response format. + :param Type[CustomPydanticModel] response_format: The Pydantic model to parse the response into + :param Any **kwargs Additional keyword arguments to pass to the .complete method + :return: The parsed response + """ + # Convert the input Pydantic Model to a strict JSON ready to be passed to chat.complete + json_response_format = response_format_from_pydantic_model(response_format) + # Run the inference + response = self.complete(**kwargs, response_format=json_response_format) + # Parse response back to the input pydantic model + parsed_response = convert_to_parsed_chat_completion_response( + response, response_format + ) + return parsed_response + + async def parse_async( + self, response_format: Type[CustomPydanticModel], **kwargs + ) -> ParsedChatCompletionResponse[CustomPydanticModel]: + """ + Asynchronously parse the response using the provided response format. + :param Type[CustomPydanticModel] response_format: The Pydantic model to parse the response into + :param Any **kwargs Additional keyword arguments to pass to the .complete method + :return: The parsed response + """ + json_response_format = response_format_from_pydantic_model(response_format) + response = await self.complete_async( # pylint: disable=E1125 + **kwargs, response_format=json_response_format + ) + parsed_response = convert_to_parsed_chat_completion_response( + response, response_format + ) + return parsed_response + + def parse_stream( + self, response_format: Type[CustomPydanticModel], **kwargs + ) -> eventstreaming.EventStream[models.CompletionEvent]: + """ + Parse the response using the provided response format. + For now the response will be in JSON format not in the input Pydantic model. + :param Type[CustomPydanticModel] response_format: The Pydantic model to parse the response into + :param Any **kwargs Additional keyword arguments to pass to the .stream method + :return: The JSON parsed response + """ + json_response_format = response_format_from_pydantic_model(response_format) + response = self.stream(**kwargs, response_format=json_response_format) + return response + + async def parse_stream_async( + self, response_format: Type[CustomPydanticModel], **kwargs + ) -> eventstreaming.EventStreamAsync[models.CompletionEvent]: + """ + Asynchronously parse the response using the provided response format. + For now the response will be in JSON format not in the input Pydantic model. + :param Type[CustomPydanticModel] response_format: The Pydantic model to parse the response into + :param Any **kwargs Additional keyword arguments to pass to the .stream method + :return: The JSON parsed response + """ + json_response_format = response_format_from_pydantic_model(response_format) + response = await self.stream_async( # pylint: disable=E1125 + **kwargs, response_format=json_response_format + ) + return response + + # endregion sdk-class-body + + def complete( + self, + *, + model: str, + messages: Union[ + List[models.ChatCompletionRequestMessage], + List[models.ChatCompletionRequestMessageTypedDict], + ], + temperature: OptionalNullable[float] = UNSET, + top_p: Optional[float] = None, + max_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = False, + stop: Optional[ + Union[ + models.ChatCompletionRequestStop, + models.ChatCompletionRequestStopTypedDict, + ] + ] = None, + random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + response_format: Optional[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = None, + tools: OptionalNullable[ + Union[List[models.Tool], List[models.ToolTypedDict]] + ] = UNSET, + tool_choice: Optional[ + Union[ + models.ChatCompletionRequestToolChoice, + models.ChatCompletionRequestToolChoiceTypedDict, + ] + ] = None, + presence_penalty: Optional[float] = None, + frequency_penalty: Optional[float] = None, + n: OptionalNullable[int] = UNSET, + prediction: Optional[ + Union[models.Prediction, models.PredictionTypedDict] + ] = None, + parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, + safe_prompt: Optional[bool] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ChatCompletionResponse: + r"""Chat Completion + + :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. + :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. + :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param n: Number of completions to return for each request, input tokens are only billed once. + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + :param parallel_tool_calls: Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. + :param safe_prompt: Whether to inject a safety prompt before all conversations. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ChatCompletionRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + metadata=metadata, + messages=utils.get_pydantic_model( + messages, List[models.ChatCompletionRequestMessage] + ), + response_format=utils.get_pydantic_model( + response_format, Optional[models.ResponseFormat] + ), + tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), + tool_choice=utils.get_pydantic_model( + tool_choice, Optional[models.ChatCompletionRequestToolChoice] + ), + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + n=n, + prediction=utils.get_pydantic_model( + prediction, Optional[models.Prediction] + ), + parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, + safe_prompt=safe_prompt, + ) + + req = self._build_request( + method="POST", + path="/v1/chat/completions", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ChatCompletionRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="chat_completion_v1_chat_completions_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ChatCompletionResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def complete_async( + self, + *, + model: str, + messages: Union[ + List[models.ChatCompletionRequestMessage], + List[models.ChatCompletionRequestMessageTypedDict], + ], + temperature: OptionalNullable[float] = UNSET, + top_p: Optional[float] = None, + max_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = False, + stop: Optional[ + Union[ + models.ChatCompletionRequestStop, + models.ChatCompletionRequestStopTypedDict, + ] + ] = None, + random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + response_format: Optional[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = None, + tools: OptionalNullable[ + Union[List[models.Tool], List[models.ToolTypedDict]] + ] = UNSET, + tool_choice: Optional[ + Union[ + models.ChatCompletionRequestToolChoice, + models.ChatCompletionRequestToolChoiceTypedDict, + ] + ] = None, + presence_penalty: Optional[float] = None, + frequency_penalty: Optional[float] = None, + n: OptionalNullable[int] = UNSET, + prediction: Optional[ + Union[models.Prediction, models.PredictionTypedDict] + ] = None, + parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, + safe_prompt: Optional[bool] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ChatCompletionResponse: + r"""Chat Completion + + :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. + :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. + :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param n: Number of completions to return for each request, input tokens are only billed once. + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + :param parallel_tool_calls: Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. + :param safe_prompt: Whether to inject a safety prompt before all conversations. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ChatCompletionRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + metadata=metadata, + messages=utils.get_pydantic_model( + messages, List[models.ChatCompletionRequestMessage] + ), + response_format=utils.get_pydantic_model( + response_format, Optional[models.ResponseFormat] + ), + tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), + tool_choice=utils.get_pydantic_model( + tool_choice, Optional[models.ChatCompletionRequestToolChoice] + ), + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + n=n, + prediction=utils.get_pydantic_model( + prediction, Optional[models.Prediction] + ), + parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, + safe_prompt=safe_prompt, + ) + + req = self._build_request_async( + method="POST", + path="/v1/chat/completions", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ChatCompletionRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="chat_completion_v1_chat_completions_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ChatCompletionResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def stream( + self, + *, + model: str, + messages: Union[ + List[models.ChatCompletionStreamRequestMessage], + List[models.ChatCompletionStreamRequestMessageTypedDict], + ], + temperature: OptionalNullable[float] = UNSET, + top_p: Optional[float] = None, + max_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = True, + stop: Optional[ + Union[ + models.ChatCompletionStreamRequestStop, + models.ChatCompletionStreamRequestStopTypedDict, + ] + ] = None, + random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + response_format: Optional[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = None, + tools: OptionalNullable[ + Union[List[models.Tool], List[models.ToolTypedDict]] + ] = UNSET, + tool_choice: Optional[ + Union[ + models.ChatCompletionStreamRequestToolChoice, + models.ChatCompletionStreamRequestToolChoiceTypedDict, + ] + ] = None, + presence_penalty: Optional[float] = None, + frequency_penalty: Optional[float] = None, + n: OptionalNullable[int] = UNSET, + prediction: Optional[ + Union[models.Prediction, models.PredictionTypedDict] + ] = None, + parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, + safe_prompt: Optional[bool] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStream[models.CompletionEvent]: + r"""Stream chat completion + + Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + + :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. + :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param stream: + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. + :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param n: Number of completions to return for each request, input tokens are only billed once. + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + :param parallel_tool_calls: Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. + :param safe_prompt: Whether to inject a safety prompt before all conversations. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ChatCompletionStreamRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + metadata=metadata, + messages=utils.get_pydantic_model( + messages, List[models.ChatCompletionStreamRequestMessage] + ), + response_format=utils.get_pydantic_model( + response_format, Optional[models.ResponseFormat] + ), + tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), + tool_choice=utils.get_pydantic_model( + tool_choice, Optional[models.ChatCompletionStreamRequestToolChoice] + ), + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + n=n, + prediction=utils.get_pydantic_model( + prediction, Optional[models.Prediction] + ), + parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, + safe_prompt=safe_prompt, + ) + + req = self._build_request( + method="POST", + path="/v1/chat/completions#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ChatCompletionStreamRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="stream_chat", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStream( + http_res, + lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), + sentinel="[DONE]", + client_ref=self, + ) + if utils.match_response(http_res, "422", "application/json"): + http_res_text = utils.stream_to_text(http_res) + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res, http_res_text + ) + raise errors.HTTPValidationError(response_data, http_res, http_res_text) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) + + async def stream_async( + self, + *, + model: str, + messages: Union[ + List[models.ChatCompletionStreamRequestMessage], + List[models.ChatCompletionStreamRequestMessageTypedDict], + ], + temperature: OptionalNullable[float] = UNSET, + top_p: Optional[float] = None, + max_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = True, + stop: Optional[ + Union[ + models.ChatCompletionStreamRequestStop, + models.ChatCompletionStreamRequestStopTypedDict, + ] + ] = None, + random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + response_format: Optional[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = None, + tools: OptionalNullable[ + Union[List[models.Tool], List[models.ToolTypedDict]] + ] = UNSET, + tool_choice: Optional[ + Union[ + models.ChatCompletionStreamRequestToolChoice, + models.ChatCompletionStreamRequestToolChoiceTypedDict, + ] + ] = None, + presence_penalty: Optional[float] = None, + frequency_penalty: Optional[float] = None, + n: OptionalNullable[int] = UNSET, + prediction: Optional[ + Union[models.Prediction, models.PredictionTypedDict] + ] = None, + parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, + safe_prompt: Optional[bool] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStreamAsync[models.CompletionEvent]: + r"""Stream chat completion + + Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + + :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. + :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param stream: + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. + :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param n: Number of completions to return for each request, input tokens are only billed once. + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + :param parallel_tool_calls: Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. + :param safe_prompt: Whether to inject a safety prompt before all conversations. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ChatCompletionStreamRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + metadata=metadata, + messages=utils.get_pydantic_model( + messages, List[models.ChatCompletionStreamRequestMessage] + ), + response_format=utils.get_pydantic_model( + response_format, Optional[models.ResponseFormat] + ), + tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), + tool_choice=utils.get_pydantic_model( + tool_choice, Optional[models.ChatCompletionStreamRequestToolChoice] + ), + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + n=n, + prediction=utils.get_pydantic_model( + prediction, Optional[models.Prediction] + ), + parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, + safe_prompt=safe_prompt, + ) + + req = self._build_request_async( + method="POST", + path="/v1/chat/completions#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ChatCompletionStreamRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="stream_chat", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStreamAsync( + http_res, + lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), + sentinel="[DONE]", + client_ref=self, + ) + if utils.match_response(http_res, "422", "application/json"): + http_res_text = await utils.stream_to_text_async(http_res) + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res, http_res_text + ) + raise errors.HTTPValidationError(response_data, http_res, http_res_text) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) diff --git a/src/mistralai/client/classifiers.py b/src/mistralai/client/classifiers.py new file mode 100644 index 00000000..67199b60 --- /dev/null +++ b/src/mistralai/client/classifiers.py @@ -0,0 +1,800 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 26e773725732 + +from .basesdk import BaseSDK +from mistralai.client import errors, models, utils +from mistralai.client._hooks import HookContext +from mistralai.client.types import OptionalNullable, UNSET +from mistralai.client.utils import get_security_from_env +from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response +from typing import Any, Dict, Mapping, Optional, Union + + +class Classifiers(BaseSDK): + r"""Classifiers API.""" + + def moderate( + self, + *, + model: str, + inputs: Union[ + models.ClassificationRequestInputs, + models.ClassificationRequestInputsTypedDict, + ], + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ModerationResponse: + r"""Moderations + + :param model: ID of the model to use. + :param inputs: Text to classify. + :param metadata: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ClassificationRequest( + model=model, + metadata=metadata, + inputs=inputs, + ) + + req = self._build_request( + method="POST", + path="/v1/moderations", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ClassificationRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="moderations_v1_moderations_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ModerationResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def moderate_async( + self, + *, + model: str, + inputs: Union[ + models.ClassificationRequestInputs, + models.ClassificationRequestInputsTypedDict, + ], + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ModerationResponse: + r"""Moderations + + :param model: ID of the model to use. + :param inputs: Text to classify. + :param metadata: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ClassificationRequest( + model=model, + metadata=metadata, + inputs=inputs, + ) + + req = self._build_request_async( + method="POST", + path="/v1/moderations", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ClassificationRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="moderations_v1_moderations_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ModerationResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def moderate_chat( + self, + *, + inputs: Union[ + models.ChatModerationRequestInputs3, + models.ChatModerationRequestInputs3TypedDict, + ], + model: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ModerationResponse: + r"""Chat Moderations + + :param inputs: Chat to classify + :param model: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ChatModerationRequest( + inputs=utils.get_pydantic_model( + inputs, models.ChatModerationRequestInputs3 + ), + model=model, + ) + + req = self._build_request( + method="POST", + path="/v1/chat/moderations", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ChatModerationRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="chat_moderations_v1_chat_moderations_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ModerationResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def moderate_chat_async( + self, + *, + inputs: Union[ + models.ChatModerationRequestInputs3, + models.ChatModerationRequestInputs3TypedDict, + ], + model: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ModerationResponse: + r"""Chat Moderations + + :param inputs: Chat to classify + :param model: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ChatModerationRequest( + inputs=utils.get_pydantic_model( + inputs, models.ChatModerationRequestInputs3 + ), + model=model, + ) + + req = self._build_request_async( + method="POST", + path="/v1/chat/moderations", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ChatModerationRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="chat_moderations_v1_chat_moderations_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ModerationResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def classify( + self, + *, + model: str, + inputs: Union[ + models.ClassificationRequestInputs, + models.ClassificationRequestInputsTypedDict, + ], + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ClassificationResponse: + r"""Classifications + + :param model: ID of the model to use. + :param inputs: Text to classify. + :param metadata: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ClassificationRequest( + model=model, + metadata=metadata, + inputs=inputs, + ) + + req = self._build_request( + method="POST", + path="/v1/classifications", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ClassificationRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="classifications_v1_classifications_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ClassificationResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def classify_async( + self, + *, + model: str, + inputs: Union[ + models.ClassificationRequestInputs, + models.ClassificationRequestInputsTypedDict, + ], + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ClassificationResponse: + r"""Classifications + + :param model: ID of the model to use. + :param inputs: Text to classify. + :param metadata: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ClassificationRequest( + model=model, + metadata=metadata, + inputs=inputs, + ) + + req = self._build_request_async( + method="POST", + path="/v1/classifications", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ClassificationRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="classifications_v1_classifications_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ClassificationResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def classify_chat( + self, + *, + model: str, + input: Union[models.Inputs, models.InputsTypedDict], + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ClassificationResponse: + r"""Chat Classifications + + :param model: + :param input: Chat to classify + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ChatClassificationRequest( + model=model, + input=utils.get_pydantic_model(input, models.Inputs), + ) + + req = self._build_request( + method="POST", + path="/v1/chat/classifications", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ChatClassificationRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="chat_classifications_v1_chat_classifications_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ClassificationResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def classify_chat_async( + self, + *, + model: str, + input: Union[models.Inputs, models.InputsTypedDict], + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ClassificationResponse: + r"""Chat Classifications + + :param model: + :param input: Chat to classify + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ChatClassificationRequest( + model=model, + input=utils.get_pydantic_model(input, models.Inputs), + ) + + req = self._build_request_async( + method="POST", + path="/v1/chat/classifications", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ChatClassificationRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="chat_classifications_v1_chat_classifications_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ClassificationResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/client/conversations.py b/src/mistralai/client/conversations.py new file mode 100644 index 00000000..ec33b1fb --- /dev/null +++ b/src/mistralai/client/conversations.py @@ -0,0 +1,2864 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 40692a878064 + +from .basesdk import BaseSDK +from mistralai.client import errors, models, utils +from mistralai.client._hooks import HookContext +from mistralai.client.types import OptionalNullable, UNSET +from mistralai.client.utils import eventstreaming, get_security_from_env +from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response +from typing import Any, Dict, List, Mapping, Optional, Union + +# region imports +import typing +from typing import AsyncGenerator +import logging +from collections import defaultdict + +from mistralai.client.models import ( + ResponseStartedEvent, + ConversationEventsData, + InputEntries, +) +from mistralai.extra.run.result import ( + RunResult, + RunResultEvents, + FunctionResultEvent, + reconstitue_entries, +) +from mistralai.extra.run.utils import run_requirements +from mistralai.extra.observability.otel import GenAISpanEnum, get_or_create_otel_tracer + +logger = logging.getLogger(__name__) +tracing_enabled, tracer = get_or_create_otel_tracer() + +if typing.TYPE_CHECKING: + from mistralai.extra.run.context import RunContext + +# endregion imports + + +class Conversations(BaseSDK): + r"""(beta) Conversations API""" + + # region sdk-class-body + # Custom run code allowing client side execution of code + + @run_requirements + async def run_async( + self, + run_ctx: "RunContext", + inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], + instructions: OptionalNullable[str] = UNSET, + tools: OptionalNullable[ + Union[ + List[models.ConversationRequestTool], + List[models.ConversationRequestToolTypedDict], + ] + ] = UNSET, + completion_args: OptionalNullable[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = UNSET, + name: OptionalNullable[str] = UNSET, + description: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> RunResult: + """Run a conversation with the given inputs and context. + + The execution of a run will only stop when no required local execution can be done.""" + from mistralai.client.beta import Beta # pylint: disable=import-outside-toplevel + from mistralai.extra.run.context import _validate_run # pylint: disable=import-outside-toplevel + from mistralai.extra.run.tools import get_function_calls # pylint: disable=import-outside-toplevel + + with tracer.start_as_current_span(GenAISpanEnum.VALIDATE_RUN.value): + req, run_result, input_entries = await _validate_run( + beta_client=Beta(self.sdk_configuration), + run_ctx=run_ctx, + inputs=inputs, + instructions=instructions, + tools=tools, + completion_args=completion_args, + ) + + with tracer.start_as_current_span(GenAISpanEnum.CONVERSATION.value): + while True: + if run_ctx.conversation_id is None: + res = await self.start_async( + inputs=input_entries, + http_headers=http_headers, + name=name, + description=description, + retries=retries, + server_url=server_url, + timeout_ms=timeout_ms, + **req, # type: ignore + ) + run_result.conversation_id = res.conversation_id + run_ctx.conversation_id = res.conversation_id + logger.info( # pylint: disable=logging-fstring-interpolation + f"Started Run with conversation with id {res.conversation_id}" + ) + else: + res = await self.append_async( + conversation_id=run_ctx.conversation_id, + inputs=input_entries, + retries=retries, + server_url=server_url, + timeout_ms=timeout_ms, + ) + run_ctx.request_count += 1 + run_result.output_entries.extend(res.outputs) + fcalls = get_function_calls(res.outputs) + if not fcalls: + logger.debug("No more function calls to execute") + break + fresults = await run_ctx.execute_function_calls(fcalls) + run_result.output_entries.extend(fresults) + input_entries = typing.cast(list[InputEntries], fresults) + return run_result + + @run_requirements + async def run_stream_async( + self, + run_ctx: "RunContext", + inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], + instructions: OptionalNullable[str] = UNSET, + tools: OptionalNullable[ + Union[ + List[models.ConversationRequestTool], + List[models.ConversationRequestToolTypedDict], + ] + ] = UNSET, + completion_args: OptionalNullable[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = UNSET, + name: OptionalNullable[str] = UNSET, + description: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> AsyncGenerator[Union[RunResultEvents, RunResult], None]: + """Similar to `run_async` but returns a generator which streams events. + + The last streamed object is the RunResult object which summarises what happened in the run.""" + from mistralai.client.beta import Beta # pylint: disable=import-outside-toplevel + from mistralai.extra.run.context import _validate_run # pylint: disable=import-outside-toplevel + from mistralai.extra.run.tools import get_function_calls # pylint: disable=import-outside-toplevel + + req, run_result, input_entries = await _validate_run( + beta_client=Beta(self.sdk_configuration), + run_ctx=run_ctx, + inputs=inputs, + instructions=instructions, + tools=tools, + completion_args=completion_args, + ) + + async def run_generator() -> ( + AsyncGenerator[Union[RunResultEvents, RunResult], None] + ): + current_entries = input_entries + while True: + received_event_tracker: defaultdict[ + int, list[ConversationEventsData] + ] = defaultdict(list) + if run_ctx.conversation_id is None: + res = await self.start_stream_async( + inputs=current_entries, + http_headers=http_headers, + name=name, + description=description, + retries=retries, + server_url=server_url, + timeout_ms=timeout_ms, + **req, # type: ignore + ) + else: + res = await self.append_stream_async( + conversation_id=run_ctx.conversation_id, + inputs=current_entries, + retries=retries, + server_url=server_url, + timeout_ms=timeout_ms, + ) + async for event in res: + if ( + isinstance(event.data, ResponseStartedEvent) + and run_ctx.conversation_id is None + ): + run_result.conversation_id = event.data.conversation_id + run_ctx.conversation_id = event.data.conversation_id + logger.info( # pylint: disable=logging-fstring-interpolation + f"Started Run with conversation with id {run_ctx.conversation_id}" + ) + if ( + output_index := getattr(event.data, "output_index", None) + ) is not None: + received_event_tracker[output_index].append(event.data) + yield typing.cast(RunResultEvents, event) + run_ctx.request_count += 1 + outputs = reconstitue_entries(received_event_tracker) + run_result.output_entries.extend(outputs) + fcalls = get_function_calls(outputs) + if not fcalls: + logger.debug("No more function calls to execute") + break + fresults = await run_ctx.execute_function_calls(fcalls) + run_result.output_entries.extend(fresults) + for fresult in fresults: + yield RunResultEvents( + event="function.result", + data=FunctionResultEvent( + type="function.result", + result=fresult.result, + tool_call_id=fresult.tool_call_id, + ), + ) + current_entries = typing.cast(list[InputEntries], fresults) + yield run_result + + return run_generator() + + # endregion sdk-class-body + + def start( + self, + *, + inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], + stream: Optional[bool] = False, + store: OptionalNullable[bool] = UNSET, + handoff_execution: OptionalNullable[ + models.ConversationRequestHandoffExecution + ] = UNSET, + instructions: OptionalNullable[str] = UNSET, + tools: Optional[ + Union[ + List[models.ConversationRequestTool], + List[models.ConversationRequestToolTypedDict], + ] + ] = None, + completion_args: OptionalNullable[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = UNSET, + name: OptionalNullable[str] = UNSET, + description: OptionalNullable[str] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + agent_id: OptionalNullable[str] = UNSET, + agent_version: OptionalNullable[ + Union[ + models.ConversationRequestAgentVersion, + models.ConversationRequestAgentVersionTypedDict, + ] + ] = UNSET, + model: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ConversationResponse: + r"""Create a conversation and append entries to it. + + Create a new conversation, using a base model or an agent and append entries. Completion and tool executions are run and the response is appended to the conversation.Use the returned conversation_id to continue the conversation. + + :param inputs: + :param stream: + :param store: + :param handoff_execution: + :param instructions: + :param tools: List of tools which are available to the model during the conversation. + :param completion_args: + :param name: + :param description: + :param metadata: + :param agent_id: + :param agent_version: + :param model: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ConversationRequest( + inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), + stream=stream, + store=store, + handoff_execution=handoff_execution, + instructions=instructions, + tools=utils.get_pydantic_model( + tools, Optional[List[models.ConversationRequestTool]] + ), + completion_args=utils.get_pydantic_model( + completion_args, OptionalNullable[models.CompletionArgs] + ), + name=name, + description=description, + metadata=metadata, + agent_id=agent_id, + agent_version=agent_version, + model=model, + ) + + req = self._build_request( + method="POST", + path="/v1/conversations", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ConversationRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_start", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ConversationResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def start_async( + self, + *, + inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], + stream: Optional[bool] = False, + store: OptionalNullable[bool] = UNSET, + handoff_execution: OptionalNullable[ + models.ConversationRequestHandoffExecution + ] = UNSET, + instructions: OptionalNullable[str] = UNSET, + tools: Optional[ + Union[ + List[models.ConversationRequestTool], + List[models.ConversationRequestToolTypedDict], + ] + ] = None, + completion_args: OptionalNullable[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = UNSET, + name: OptionalNullable[str] = UNSET, + description: OptionalNullable[str] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + agent_id: OptionalNullable[str] = UNSET, + agent_version: OptionalNullable[ + Union[ + models.ConversationRequestAgentVersion, + models.ConversationRequestAgentVersionTypedDict, + ] + ] = UNSET, + model: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ConversationResponse: + r"""Create a conversation and append entries to it. + + Create a new conversation, using a base model or an agent and append entries. Completion and tool executions are run and the response is appended to the conversation.Use the returned conversation_id to continue the conversation. + + :param inputs: + :param stream: + :param store: + :param handoff_execution: + :param instructions: + :param tools: List of tools which are available to the model during the conversation. + :param completion_args: + :param name: + :param description: + :param metadata: + :param agent_id: + :param agent_version: + :param model: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ConversationRequest( + inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), + stream=stream, + store=store, + handoff_execution=handoff_execution, + instructions=instructions, + tools=utils.get_pydantic_model( + tools, Optional[List[models.ConversationRequestTool]] + ), + completion_args=utils.get_pydantic_model( + completion_args, OptionalNullable[models.CompletionArgs] + ), + name=name, + description=description, + metadata=metadata, + agent_id=agent_id, + agent_version=agent_version, + model=model, + ) + + req = self._build_request_async( + method="POST", + path="/v1/conversations", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ConversationRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_start", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ConversationResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def list( + self, + *, + page: Optional[int] = 0, + page_size: Optional[int] = 100, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> List[models.AgentsAPIV1ConversationsListResponse]: + r"""List all created conversations. + + Retrieve a list of conversation entities sorted by creation time. + + :param page: + :param page_size: + :param metadata: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsListRequest( + page=page, + page_size=page_size, + metadata=metadata, + ) + + req = self._build_request( + method="GET", + path="/v1/conversations", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_list", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response( + List[models.AgentsAPIV1ConversationsListResponse], http_res + ) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def list_async( + self, + *, + page: Optional[int] = 0, + page_size: Optional[int] = 100, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> List[models.AgentsAPIV1ConversationsListResponse]: + r"""List all created conversations. + + Retrieve a list of conversation entities sorted by creation time. + + :param page: + :param page_size: + :param metadata: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsListRequest( + page=page, + page_size=page_size, + metadata=metadata, + ) + + req = self._build_request_async( + method="GET", + path="/v1/conversations", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_list", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response( + List[models.AgentsAPIV1ConversationsListResponse], http_res + ) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def get( + self, + *, + conversation_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ResponseV1ConversationsGet: + r"""Retrieve a conversation information. + + Given a conversation_id retrieve a conversation entity with its attributes. + + :param conversation_id: ID of the conversation from which we are fetching metadata. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsGetRequest( + conversation_id=conversation_id, + ) + + req = self._build_request( + method="GET", + path="/v1/conversations/{conversation_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_get", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ResponseV1ConversationsGet, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def get_async( + self, + *, + conversation_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ResponseV1ConversationsGet: + r"""Retrieve a conversation information. + + Given a conversation_id retrieve a conversation entity with its attributes. + + :param conversation_id: ID of the conversation from which we are fetching metadata. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsGetRequest( + conversation_id=conversation_id, + ) + + req = self._build_request_async( + method="GET", + path="/v1/conversations/{conversation_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_get", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ResponseV1ConversationsGet, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def delete( + self, + *, + conversation_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ): + r"""Delete a conversation. + + Delete a conversation given a conversation_id. + + :param conversation_id: ID of the conversation from which we are fetching metadata. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsDeleteRequest( + conversation_id=conversation_id, + ) + + req = self._build_request( + method="DELETE", + path="/v1/conversations/{conversation_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_delete", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "204", "*"): + return + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def delete_async( + self, + *, + conversation_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ): + r"""Delete a conversation. + + Delete a conversation given a conversation_id. + + :param conversation_id: ID of the conversation from which we are fetching metadata. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsDeleteRequest( + conversation_id=conversation_id, + ) + + req = self._build_request_async( + method="DELETE", + path="/v1/conversations/{conversation_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_delete", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "204", "*"): + return + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def append( + self, + *, + conversation_id: str, + inputs: Optional[ + Union[models.ConversationInputs, models.ConversationInputsTypedDict] + ] = None, + stream: Optional[bool] = False, + store: Optional[bool] = True, + handoff_execution: Optional[ + models.ConversationAppendRequestHandoffExecution + ] = "server", + completion_args: Optional[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = None, + tool_confirmations: OptionalNullable[ + Union[ + List[models.ToolCallConfirmation], + List[models.ToolCallConfirmationTypedDict], + ] + ] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ConversationResponse: + r"""Append new entries to an existing conversation. + + Run completion on the history of the conversation and the user entries. Return the new created entries. + + :param conversation_id: ID of the conversation to which we append entries. + :param inputs: + :param stream: + :param store: Whether to store the results into our servers or not. + :param handoff_execution: + :param completion_args: White-listed arguments from the completion API + :param tool_confirmations: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsAppendRequest( + conversation_id=conversation_id, + conversation_append_request=models.ConversationAppendRequest( + inputs=utils.get_pydantic_model( + inputs, Optional[models.ConversationInputs] + ), + stream=stream, + store=store, + handoff_execution=handoff_execution, + completion_args=utils.get_pydantic_model( + completion_args, Optional[models.CompletionArgs] + ), + tool_confirmations=utils.get_pydantic_model( + tool_confirmations, + OptionalNullable[List[models.ToolCallConfirmation]], + ), + ), + ) + + req = self._build_request( + method="POST", + path="/v1/conversations/{conversation_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.conversation_append_request, + False, + False, + "json", + models.ConversationAppendRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_append", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ConversationResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def append_async( + self, + *, + conversation_id: str, + inputs: Optional[ + Union[models.ConversationInputs, models.ConversationInputsTypedDict] + ] = None, + stream: Optional[bool] = False, + store: Optional[bool] = True, + handoff_execution: Optional[ + models.ConversationAppendRequestHandoffExecution + ] = "server", + completion_args: Optional[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = None, + tool_confirmations: OptionalNullable[ + Union[ + List[models.ToolCallConfirmation], + List[models.ToolCallConfirmationTypedDict], + ] + ] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ConversationResponse: + r"""Append new entries to an existing conversation. + + Run completion on the history of the conversation and the user entries. Return the new created entries. + + :param conversation_id: ID of the conversation to which we append entries. + :param inputs: + :param stream: + :param store: Whether to store the results into our servers or not. + :param handoff_execution: + :param completion_args: White-listed arguments from the completion API + :param tool_confirmations: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsAppendRequest( + conversation_id=conversation_id, + conversation_append_request=models.ConversationAppendRequest( + inputs=utils.get_pydantic_model( + inputs, Optional[models.ConversationInputs] + ), + stream=stream, + store=store, + handoff_execution=handoff_execution, + completion_args=utils.get_pydantic_model( + completion_args, Optional[models.CompletionArgs] + ), + tool_confirmations=utils.get_pydantic_model( + tool_confirmations, + OptionalNullable[List[models.ToolCallConfirmation]], + ), + ), + ) + + req = self._build_request_async( + method="POST", + path="/v1/conversations/{conversation_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.conversation_append_request, + False, + False, + "json", + models.ConversationAppendRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_append", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ConversationResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def get_history( + self, + *, + conversation_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ConversationHistory: + r"""Retrieve all entries in a conversation. + + Given a conversation_id retrieve all the entries belonging to that conversation. The entries are sorted in the order they were appended, those can be messages, connectors or function_call. + + :param conversation_id: ID of the conversation from which we are fetching entries. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsHistoryRequest( + conversation_id=conversation_id, + ) + + req = self._build_request( + method="GET", + path="/v1/conversations/{conversation_id}/history", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_history", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ConversationHistory, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def get_history_async( + self, + *, + conversation_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ConversationHistory: + r"""Retrieve all entries in a conversation. + + Given a conversation_id retrieve all the entries belonging to that conversation. The entries are sorted in the order they were appended, those can be messages, connectors or function_call. + + :param conversation_id: ID of the conversation from which we are fetching entries. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsHistoryRequest( + conversation_id=conversation_id, + ) + + req = self._build_request_async( + method="GET", + path="/v1/conversations/{conversation_id}/history", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_history", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ConversationHistory, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def get_messages( + self, + *, + conversation_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ConversationMessages: + r"""Retrieve all messages in a conversation. + + Given a conversation_id retrieve all the messages belonging to that conversation. This is similar to retrieving all entries except we filter the messages only. + + :param conversation_id: ID of the conversation from which we are fetching messages. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsMessagesRequest( + conversation_id=conversation_id, + ) + + req = self._build_request( + method="GET", + path="/v1/conversations/{conversation_id}/messages", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_messages", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ConversationMessages, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def get_messages_async( + self, + *, + conversation_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ConversationMessages: + r"""Retrieve all messages in a conversation. + + Given a conversation_id retrieve all the messages belonging to that conversation. This is similar to retrieving all entries except we filter the messages only. + + :param conversation_id: ID of the conversation from which we are fetching messages. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsMessagesRequest( + conversation_id=conversation_id, + ) + + req = self._build_request_async( + method="GET", + path="/v1/conversations/{conversation_id}/messages", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_messages", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ConversationMessages, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def restart( + self, + *, + conversation_id: str, + from_entry_id: str, + inputs: Optional[ + Union[models.ConversationInputs, models.ConversationInputsTypedDict] + ] = None, + stream: Optional[bool] = False, + store: Optional[bool] = True, + handoff_execution: Optional[ + models.ConversationRestartRequestHandoffExecution + ] = "server", + completion_args: Optional[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = None, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + agent_version: OptionalNullable[ + Union[ + models.ConversationRestartRequestAgentVersion, + models.ConversationRestartRequestAgentVersionTypedDict, + ] + ] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ConversationResponse: + r"""Restart a conversation starting from a given entry. + + Given a conversation_id and an id, recreate a conversation from this point and run completion. A new conversation is returned with the new entries returned. + + :param conversation_id: ID of the original conversation which is being restarted. + :param from_entry_id: + :param inputs: + :param stream: + :param store: Whether to store the results into our servers or not. + :param handoff_execution: + :param completion_args: White-listed arguments from the completion API + :param metadata: Custom metadata for the conversation. + :param agent_version: Specific version of the agent to use when restarting. If not provided, uses the current version. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsRestartRequest( + conversation_id=conversation_id, + conversation_restart_request=models.ConversationRestartRequest( + inputs=utils.get_pydantic_model( + inputs, Optional[models.ConversationInputs] + ), + stream=stream, + store=store, + handoff_execution=handoff_execution, + completion_args=utils.get_pydantic_model( + completion_args, Optional[models.CompletionArgs] + ), + metadata=metadata, + from_entry_id=from_entry_id, + agent_version=agent_version, + ), + ) + + req = self._build_request( + method="POST", + path="/v1/conversations/{conversation_id}/restart", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.conversation_restart_request, + False, + False, + "json", + models.ConversationRestartRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_restart", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ConversationResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def restart_async( + self, + *, + conversation_id: str, + from_entry_id: str, + inputs: Optional[ + Union[models.ConversationInputs, models.ConversationInputsTypedDict] + ] = None, + stream: Optional[bool] = False, + store: Optional[bool] = True, + handoff_execution: Optional[ + models.ConversationRestartRequestHandoffExecution + ] = "server", + completion_args: Optional[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = None, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + agent_version: OptionalNullable[ + Union[ + models.ConversationRestartRequestAgentVersion, + models.ConversationRestartRequestAgentVersionTypedDict, + ] + ] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ConversationResponse: + r"""Restart a conversation starting from a given entry. + + Given a conversation_id and an id, recreate a conversation from this point and run completion. A new conversation is returned with the new entries returned. + + :param conversation_id: ID of the original conversation which is being restarted. + :param from_entry_id: + :param inputs: + :param stream: + :param store: Whether to store the results into our servers or not. + :param handoff_execution: + :param completion_args: White-listed arguments from the completion API + :param metadata: Custom metadata for the conversation. + :param agent_version: Specific version of the agent to use when restarting. If not provided, uses the current version. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsRestartRequest( + conversation_id=conversation_id, + conversation_restart_request=models.ConversationRestartRequest( + inputs=utils.get_pydantic_model( + inputs, Optional[models.ConversationInputs] + ), + stream=stream, + store=store, + handoff_execution=handoff_execution, + completion_args=utils.get_pydantic_model( + completion_args, Optional[models.CompletionArgs] + ), + metadata=metadata, + from_entry_id=from_entry_id, + agent_version=agent_version, + ), + ) + + req = self._build_request_async( + method="POST", + path="/v1/conversations/{conversation_id}/restart", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.conversation_restart_request, + False, + False, + "json", + models.ConversationRestartRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_restart", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ConversationResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def start_stream( + self, + *, + inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], + stream: Optional[bool] = True, + store: OptionalNullable[bool] = UNSET, + handoff_execution: OptionalNullable[ + models.ConversationStreamRequestHandoffExecution + ] = UNSET, + instructions: OptionalNullable[str] = UNSET, + tools: Optional[ + Union[ + List[models.ConversationStreamRequestTool], + List[models.ConversationStreamRequestToolTypedDict], + ] + ] = None, + completion_args: OptionalNullable[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = UNSET, + name: OptionalNullable[str] = UNSET, + description: OptionalNullable[str] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + agent_id: OptionalNullable[str] = UNSET, + agent_version: OptionalNullable[ + Union[ + models.ConversationStreamRequestAgentVersion, + models.ConversationStreamRequestAgentVersionTypedDict, + ] + ] = UNSET, + model: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStream[models.ConversationEvents]: + r"""Create a conversation and append entries to it. + + Create a new conversation, using a base model or an agent and append entries. Completion and tool executions are run and the response is appended to the conversation.Use the returned conversation_id to continue the conversation. + + :param inputs: + :param stream: + :param store: + :param handoff_execution: + :param instructions: + :param tools: List of tools which are available to the model during the conversation. + :param completion_args: + :param name: + :param description: + :param metadata: + :param agent_id: + :param agent_version: + :param model: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ConversationStreamRequest( + inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), + stream=stream, + store=store, + handoff_execution=handoff_execution, + instructions=instructions, + tools=utils.get_pydantic_model( + tools, Optional[List[models.ConversationStreamRequestTool]] + ), + completion_args=utils.get_pydantic_model( + completion_args, OptionalNullable[models.CompletionArgs] + ), + name=name, + description=description, + metadata=metadata, + agent_id=agent_id, + agent_version=agent_version, + model=model, + ) + + req = self._build_request( + method="POST", + path="/v1/conversations#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ConversationStreamRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_start_stream", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStream( + http_res, + lambda raw: utils.unmarshal_json(raw, models.ConversationEvents), + client_ref=self, + ) + if utils.match_response(http_res, "422", "application/json"): + http_res_text = utils.stream_to_text(http_res) + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res, http_res_text + ) + raise errors.HTTPValidationError(response_data, http_res, http_res_text) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) + + async def start_stream_async( + self, + *, + inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], + stream: Optional[bool] = True, + store: OptionalNullable[bool] = UNSET, + handoff_execution: OptionalNullable[ + models.ConversationStreamRequestHandoffExecution + ] = UNSET, + instructions: OptionalNullable[str] = UNSET, + tools: Optional[ + Union[ + List[models.ConversationStreamRequestTool], + List[models.ConversationStreamRequestToolTypedDict], + ] + ] = None, + completion_args: OptionalNullable[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = UNSET, + name: OptionalNullable[str] = UNSET, + description: OptionalNullable[str] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + agent_id: OptionalNullable[str] = UNSET, + agent_version: OptionalNullable[ + Union[ + models.ConversationStreamRequestAgentVersion, + models.ConversationStreamRequestAgentVersionTypedDict, + ] + ] = UNSET, + model: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStreamAsync[models.ConversationEvents]: + r"""Create a conversation and append entries to it. + + Create a new conversation, using a base model or an agent and append entries. Completion and tool executions are run and the response is appended to the conversation.Use the returned conversation_id to continue the conversation. + + :param inputs: + :param stream: + :param store: + :param handoff_execution: + :param instructions: + :param tools: List of tools which are available to the model during the conversation. + :param completion_args: + :param name: + :param description: + :param metadata: + :param agent_id: + :param agent_version: + :param model: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ConversationStreamRequest( + inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), + stream=stream, + store=store, + handoff_execution=handoff_execution, + instructions=instructions, + tools=utils.get_pydantic_model( + tools, Optional[List[models.ConversationStreamRequestTool]] + ), + completion_args=utils.get_pydantic_model( + completion_args, OptionalNullable[models.CompletionArgs] + ), + name=name, + description=description, + metadata=metadata, + agent_id=agent_id, + agent_version=agent_version, + model=model, + ) + + req = self._build_request_async( + method="POST", + path="/v1/conversations#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ConversationStreamRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_start_stream", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStreamAsync( + http_res, + lambda raw: utils.unmarshal_json(raw, models.ConversationEvents), + client_ref=self, + ) + if utils.match_response(http_res, "422", "application/json"): + http_res_text = await utils.stream_to_text_async(http_res) + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res, http_res_text + ) + raise errors.HTTPValidationError(response_data, http_res, http_res_text) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) + + def append_stream( + self, + *, + conversation_id: str, + inputs: Optional[ + Union[models.ConversationInputs, models.ConversationInputsTypedDict] + ] = None, + stream: Optional[bool] = True, + store: Optional[bool] = True, + handoff_execution: Optional[ + models.ConversationAppendStreamRequestHandoffExecution + ] = "server", + completion_args: Optional[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = None, + tool_confirmations: OptionalNullable[ + Union[ + List[models.ToolCallConfirmation], + List[models.ToolCallConfirmationTypedDict], + ] + ] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStream[models.ConversationEvents]: + r"""Append new entries to an existing conversation. + + Run completion on the history of the conversation and the user entries. Return the new created entries. + + :param conversation_id: ID of the conversation to which we append entries. + :param inputs: + :param stream: + :param store: Whether to store the results into our servers or not. + :param handoff_execution: + :param completion_args: White-listed arguments from the completion API + :param tool_confirmations: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsAppendStreamRequest( + conversation_id=conversation_id, + conversation_append_stream_request=models.ConversationAppendStreamRequest( + inputs=utils.get_pydantic_model( + inputs, Optional[models.ConversationInputs] + ), + stream=stream, + store=store, + handoff_execution=handoff_execution, + completion_args=utils.get_pydantic_model( + completion_args, Optional[models.CompletionArgs] + ), + tool_confirmations=utils.get_pydantic_model( + tool_confirmations, + OptionalNullable[List[models.ToolCallConfirmation]], + ), + ), + ) + + req = self._build_request( + method="POST", + path="/v1/conversations/{conversation_id}#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.conversation_append_stream_request, + False, + False, + "json", + models.ConversationAppendStreamRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_append_stream", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStream( + http_res, + lambda raw: utils.unmarshal_json(raw, models.ConversationEvents), + client_ref=self, + ) + if utils.match_response(http_res, "422", "application/json"): + http_res_text = utils.stream_to_text(http_res) + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res, http_res_text + ) + raise errors.HTTPValidationError(response_data, http_res, http_res_text) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) + + async def append_stream_async( + self, + *, + conversation_id: str, + inputs: Optional[ + Union[models.ConversationInputs, models.ConversationInputsTypedDict] + ] = None, + stream: Optional[bool] = True, + store: Optional[bool] = True, + handoff_execution: Optional[ + models.ConversationAppendStreamRequestHandoffExecution + ] = "server", + completion_args: Optional[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = None, + tool_confirmations: OptionalNullable[ + Union[ + List[models.ToolCallConfirmation], + List[models.ToolCallConfirmationTypedDict], + ] + ] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStreamAsync[models.ConversationEvents]: + r"""Append new entries to an existing conversation. + + Run completion on the history of the conversation and the user entries. Return the new created entries. + + :param conversation_id: ID of the conversation to which we append entries. + :param inputs: + :param stream: + :param store: Whether to store the results into our servers or not. + :param handoff_execution: + :param completion_args: White-listed arguments from the completion API + :param tool_confirmations: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsAppendStreamRequest( + conversation_id=conversation_id, + conversation_append_stream_request=models.ConversationAppendStreamRequest( + inputs=utils.get_pydantic_model( + inputs, Optional[models.ConversationInputs] + ), + stream=stream, + store=store, + handoff_execution=handoff_execution, + completion_args=utils.get_pydantic_model( + completion_args, Optional[models.CompletionArgs] + ), + tool_confirmations=utils.get_pydantic_model( + tool_confirmations, + OptionalNullable[List[models.ToolCallConfirmation]], + ), + ), + ) + + req = self._build_request_async( + method="POST", + path="/v1/conversations/{conversation_id}#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.conversation_append_stream_request, + False, + False, + "json", + models.ConversationAppendStreamRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_append_stream", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStreamAsync( + http_res, + lambda raw: utils.unmarshal_json(raw, models.ConversationEvents), + client_ref=self, + ) + if utils.match_response(http_res, "422", "application/json"): + http_res_text = await utils.stream_to_text_async(http_res) + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res, http_res_text + ) + raise errors.HTTPValidationError(response_data, http_res, http_res_text) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) + + def restart_stream( + self, + *, + conversation_id: str, + from_entry_id: str, + inputs: Optional[ + Union[models.ConversationInputs, models.ConversationInputsTypedDict] + ] = None, + stream: Optional[bool] = True, + store: Optional[bool] = True, + handoff_execution: Optional[ + models.ConversationRestartStreamRequestHandoffExecution + ] = "server", + completion_args: Optional[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = None, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + agent_version: OptionalNullable[ + Union[ + models.ConversationRestartStreamRequestAgentVersion, + models.ConversationRestartStreamRequestAgentVersionTypedDict, + ] + ] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStream[models.ConversationEvents]: + r"""Restart a conversation starting from a given entry. + + Given a conversation_id and an id, recreate a conversation from this point and run completion. A new conversation is returned with the new entries returned. + + :param conversation_id: ID of the original conversation which is being restarted. + :param from_entry_id: + :param inputs: + :param stream: + :param store: Whether to store the results into our servers or not. + :param handoff_execution: + :param completion_args: White-listed arguments from the completion API + :param metadata: Custom metadata for the conversation. + :param agent_version: Specific version of the agent to use when restarting. If not provided, uses the current version. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsRestartStreamRequest( + conversation_id=conversation_id, + conversation_restart_stream_request=models.ConversationRestartStreamRequest( + inputs=utils.get_pydantic_model( + inputs, Optional[models.ConversationInputs] + ), + stream=stream, + store=store, + handoff_execution=handoff_execution, + completion_args=utils.get_pydantic_model( + completion_args, Optional[models.CompletionArgs] + ), + metadata=metadata, + from_entry_id=from_entry_id, + agent_version=agent_version, + ), + ) + + req = self._build_request( + method="POST", + path="/v1/conversations/{conversation_id}/restart#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.conversation_restart_stream_request, + False, + False, + "json", + models.ConversationRestartStreamRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_restart_stream", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStream( + http_res, + lambda raw: utils.unmarshal_json(raw, models.ConversationEvents), + client_ref=self, + ) + if utils.match_response(http_res, "422", "application/json"): + http_res_text = utils.stream_to_text(http_res) + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res, http_res_text + ) + raise errors.HTTPValidationError(response_data, http_res, http_res_text) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) + + async def restart_stream_async( + self, + *, + conversation_id: str, + from_entry_id: str, + inputs: Optional[ + Union[models.ConversationInputs, models.ConversationInputsTypedDict] + ] = None, + stream: Optional[bool] = True, + store: Optional[bool] = True, + handoff_execution: Optional[ + models.ConversationRestartStreamRequestHandoffExecution + ] = "server", + completion_args: Optional[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = None, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + agent_version: OptionalNullable[ + Union[ + models.ConversationRestartStreamRequestAgentVersion, + models.ConversationRestartStreamRequestAgentVersionTypedDict, + ] + ] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStreamAsync[models.ConversationEvents]: + r"""Restart a conversation starting from a given entry. + + Given a conversation_id and an id, recreate a conversation from this point and run completion. A new conversation is returned with the new entries returned. + + :param conversation_id: ID of the original conversation which is being restarted. + :param from_entry_id: + :param inputs: + :param stream: + :param store: Whether to store the results into our servers or not. + :param handoff_execution: + :param completion_args: White-listed arguments from the completion API + :param metadata: Custom metadata for the conversation. + :param agent_version: Specific version of the agent to use when restarting. If not provided, uses the current version. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsRestartStreamRequest( + conversation_id=conversation_id, + conversation_restart_stream_request=models.ConversationRestartStreamRequest( + inputs=utils.get_pydantic_model( + inputs, Optional[models.ConversationInputs] + ), + stream=stream, + store=store, + handoff_execution=handoff_execution, + completion_args=utils.get_pydantic_model( + completion_args, Optional[models.CompletionArgs] + ), + metadata=metadata, + from_entry_id=from_entry_id, + agent_version=agent_version, + ), + ) + + req = self._build_request_async( + method="POST", + path="/v1/conversations/{conversation_id}/restart#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.conversation_restart_stream_request, + False, + False, + "json", + models.ConversationRestartStreamRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_restart_stream", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStreamAsync( + http_res, + lambda raw: utils.unmarshal_json(raw, models.ConversationEvents), + client_ref=self, + ) + if utils.match_response(http_res, "422", "application/json"): + http_res_text = await utils.stream_to_text_async(http_res) + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res, http_res_text + ) + raise errors.HTTPValidationError(response_data, http_res, http_res_text) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) diff --git a/src/mistralai/client/documents.py b/src/mistralai/client/documents.py new file mode 100644 index 00000000..b3130364 --- /dev/null +++ b/src/mistralai/client/documents.py @@ -0,0 +1,1964 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: bcc17286c31c + +from .basesdk import BaseSDK +from mistralai.client import errors, models, utils +from mistralai.client._hooks import HookContext +from mistralai.client.types import OptionalNullable, UNSET +from mistralai.client.utils import get_security_from_env +from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response +from typing import Any, Dict, Mapping, Optional, Union + + +class Documents(BaseSDK): + r"""(beta) Libraries API - manage documents in a library.""" + + def list( + self, + *, + library_id: str, + search: OptionalNullable[str] = UNSET, + page_size: Optional[int] = 100, + page: Optional[int] = 0, + filters_attributes: OptionalNullable[str] = UNSET, + sort_by: Optional[str] = "created_at", + sort_order: Optional[str] = "desc", + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ListDocumentsResponse: + r"""List documents in a given library. + + Given a library, lists the document that have been uploaded to that library. + + :param library_id: + :param search: + :param page_size: + :param page: + :param filters_attributes: + :param sort_by: + :param sort_order: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsListV1Request( + library_id=library_id, + search=search, + page_size=page_size, + page=page, + filters_attributes=filters_attributes, + sort_by=sort_by, + sort_order=sort_order, + ) + + req = self._build_request( + method="GET", + path="/v1/libraries/{library_id}/documents", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_list_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ListDocumentsResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def list_async( + self, + *, + library_id: str, + search: OptionalNullable[str] = UNSET, + page_size: Optional[int] = 100, + page: Optional[int] = 0, + filters_attributes: OptionalNullable[str] = UNSET, + sort_by: Optional[str] = "created_at", + sort_order: Optional[str] = "desc", + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ListDocumentsResponse: + r"""List documents in a given library. + + Given a library, lists the document that have been uploaded to that library. + + :param library_id: + :param search: + :param page_size: + :param page: + :param filters_attributes: + :param sort_by: + :param sort_order: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsListV1Request( + library_id=library_id, + search=search, + page_size=page_size, + page=page, + filters_attributes=filters_attributes, + sort_by=sort_by, + sort_order=sort_order, + ) + + req = self._build_request_async( + method="GET", + path="/v1/libraries/{library_id}/documents", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_list_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ListDocumentsResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def upload( + self, + *, + library_id: str, + file: Union[models.File, models.FileTypedDict], + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Document: + r"""Upload a new document. + + Given a library, upload a new document to that library. It is queued for processing, it status will change it has been processed. The processing has to be completed in order be discoverable for the library search + + :param library_id: + :param file: The File object (not file name) to be uploaded. + To upload a file and specify a custom file name you should format your request as such: + ```bash + file=@path/to/your/file.jsonl;filename=custom_name.jsonl + ``` + Otherwise, you can just keep the original file name: + ```bash + file=@path/to/your/file.jsonl + ``` + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsUploadV1Request( + library_id=library_id, + request_body=models.DocumentUpload( + file=utils.get_pydantic_model(file, models.File), + ), + ) + + req = self._build_request( + method="POST", + path="/v1/libraries/{library_id}/documents", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.request_body, False, False, "multipart", models.DocumentUpload + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_upload_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, ["200", "201"], "application/json"): + return unmarshal_json_response(models.Document, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def upload_async( + self, + *, + library_id: str, + file: Union[models.File, models.FileTypedDict], + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Document: + r"""Upload a new document. + + Given a library, upload a new document to that library. It is queued for processing, it status will change it has been processed. The processing has to be completed in order be discoverable for the library search + + :param library_id: + :param file: The File object (not file name) to be uploaded. + To upload a file and specify a custom file name you should format your request as such: + ```bash + file=@path/to/your/file.jsonl;filename=custom_name.jsonl + ``` + Otherwise, you can just keep the original file name: + ```bash + file=@path/to/your/file.jsonl + ``` + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsUploadV1Request( + library_id=library_id, + request_body=models.DocumentUpload( + file=utils.get_pydantic_model(file, models.File), + ), + ) + + req = self._build_request_async( + method="POST", + path="/v1/libraries/{library_id}/documents", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.request_body, False, False, "multipart", models.DocumentUpload + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_upload_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, ["200", "201"], "application/json"): + return unmarshal_json_response(models.Document, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def get( + self, + *, + library_id: str, + document_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Document: + r"""Retrieve the metadata of a specific document. + + Given a library and a document in this library, you can retrieve the metadata of that document. + + :param library_id: + :param document_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsGetV1Request( + library_id=library_id, + document_id=document_id, + ) + + req = self._build_request( + method="GET", + path="/v1/libraries/{library_id}/documents/{document_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_get_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.Document, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def get_async( + self, + *, + library_id: str, + document_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Document: + r"""Retrieve the metadata of a specific document. + + Given a library and a document in this library, you can retrieve the metadata of that document. + + :param library_id: + :param document_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsGetV1Request( + library_id=library_id, + document_id=document_id, + ) + + req = self._build_request_async( + method="GET", + path="/v1/libraries/{library_id}/documents/{document_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_get_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.Document, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def update( + self, + *, + library_id: str, + document_id: str, + name: OptionalNullable[str] = UNSET, + attributes: OptionalNullable[ + Union[Dict[str, models.Attributes], Dict[str, models.AttributesTypedDict]] + ] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Document: + r"""Update the metadata of a specific document. + + Given a library and a document in that library, update the name of that document. + + :param library_id: + :param document_id: + :param name: + :param attributes: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsUpdateV1Request( + library_id=library_id, + document_id=document_id, + update_document_request=models.UpdateDocumentRequest( + name=name, + attributes=attributes, + ), + ) + + req = self._build_request( + method="PUT", + path="/v1/libraries/{library_id}/documents/{document_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.update_document_request, + False, + False, + "json", + models.UpdateDocumentRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_update_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.Document, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def update_async( + self, + *, + library_id: str, + document_id: str, + name: OptionalNullable[str] = UNSET, + attributes: OptionalNullable[ + Union[Dict[str, models.Attributes], Dict[str, models.AttributesTypedDict]] + ] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Document: + r"""Update the metadata of a specific document. + + Given a library and a document in that library, update the name of that document. + + :param library_id: + :param document_id: + :param name: + :param attributes: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsUpdateV1Request( + library_id=library_id, + document_id=document_id, + update_document_request=models.UpdateDocumentRequest( + name=name, + attributes=attributes, + ), + ) + + req = self._build_request_async( + method="PUT", + path="/v1/libraries/{library_id}/documents/{document_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.update_document_request, + False, + False, + "json", + models.UpdateDocumentRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_update_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.Document, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def delete( + self, + *, + library_id: str, + document_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ): + r"""Delete a document. + + Given a library and a document in that library, delete that document. The document will be deleted from the library and the search index. + + :param library_id: + :param document_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsDeleteV1Request( + library_id=library_id, + document_id=document_id, + ) + + req = self._build_request( + method="DELETE", + path="/v1/libraries/{library_id}/documents/{document_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_delete_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "204", "*"): + return + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def delete_async( + self, + *, + library_id: str, + document_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ): + r"""Delete a document. + + Given a library and a document in that library, delete that document. The document will be deleted from the library and the search index. + + :param library_id: + :param document_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsDeleteV1Request( + library_id=library_id, + document_id=document_id, + ) + + req = self._build_request_async( + method="DELETE", + path="/v1/libraries/{library_id}/documents/{document_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_delete_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "204", "*"): + return + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def text_content( + self, + *, + library_id: str, + document_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.DocumentTextContent: + r"""Retrieve the text content of a specific document. + + Given a library and a document in that library, you can retrieve the text content of that document if it exists. For documents like pdf, docx and pptx the text content results from our processing using Mistral OCR. + + :param library_id: + :param document_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsGetTextContentV1Request( + library_id=library_id, + document_id=document_id, + ) + + req = self._build_request( + method="GET", + path="/v1/libraries/{library_id}/documents/{document_id}/text_content", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_get_text_content_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.DocumentTextContent, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def text_content_async( + self, + *, + library_id: str, + document_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.DocumentTextContent: + r"""Retrieve the text content of a specific document. + + Given a library and a document in that library, you can retrieve the text content of that document if it exists. For documents like pdf, docx and pptx the text content results from our processing using Mistral OCR. + + :param library_id: + :param document_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsGetTextContentV1Request( + library_id=library_id, + document_id=document_id, + ) + + req = self._build_request_async( + method="GET", + path="/v1/libraries/{library_id}/documents/{document_id}/text_content", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_get_text_content_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.DocumentTextContent, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def status( + self, + *, + library_id: str, + document_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ProcessingStatusOut: + r"""Retrieve the processing status of a specific document. + + Given a library and a document in that library, retrieve the processing status of that document. + + :param library_id: + :param document_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsGetStatusV1Request( + library_id=library_id, + document_id=document_id, + ) + + req = self._build_request( + method="GET", + path="/v1/libraries/{library_id}/documents/{document_id}/status", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_get_status_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ProcessingStatusOut, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def status_async( + self, + *, + library_id: str, + document_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ProcessingStatusOut: + r"""Retrieve the processing status of a specific document. + + Given a library and a document in that library, retrieve the processing status of that document. + + :param library_id: + :param document_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsGetStatusV1Request( + library_id=library_id, + document_id=document_id, + ) + + req = self._build_request_async( + method="GET", + path="/v1/libraries/{library_id}/documents/{document_id}/status", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_get_status_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ProcessingStatusOut, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def get_signed_url( + self, + *, + library_id: str, + document_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> str: + r"""Retrieve the signed URL of a specific document. + + Given a library and a document in that library, retrieve the signed URL of a specific document.The url will expire after 30 minutes and can be accessed by anyone with the link. + + :param library_id: + :param document_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsGetSignedURLV1Request( + library_id=library_id, + document_id=document_id, + ) + + req = self._build_request( + method="GET", + path="/v1/libraries/{library_id}/documents/{document_id}/signed-url", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_get_signed_url_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(str, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def get_signed_url_async( + self, + *, + library_id: str, + document_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> str: + r"""Retrieve the signed URL of a specific document. + + Given a library and a document in that library, retrieve the signed URL of a specific document.The url will expire after 30 minutes and can be accessed by anyone with the link. + + :param library_id: + :param document_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsGetSignedURLV1Request( + library_id=library_id, + document_id=document_id, + ) + + req = self._build_request_async( + method="GET", + path="/v1/libraries/{library_id}/documents/{document_id}/signed-url", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_get_signed_url_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(str, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def extracted_text_signed_url( + self, + *, + library_id: str, + document_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> str: + r"""Retrieve the signed URL of text extracted from a given document. + + Given a library and a document in that library, retrieve the signed URL of text extracted. For documents that are sent to the OCR this returns the result of the OCR queries. + + :param library_id: + :param document_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsGetExtractedTextSignedURLV1Request( + library_id=library_id, + document_id=document_id, + ) + + req = self._build_request( + method="GET", + path="/v1/libraries/{library_id}/documents/{document_id}/extracted-text-signed-url", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_get_extracted_text_signed_url_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(str, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def extracted_text_signed_url_async( + self, + *, + library_id: str, + document_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> str: + r"""Retrieve the signed URL of text extracted from a given document. + + Given a library and a document in that library, retrieve the signed URL of text extracted. For documents that are sent to the OCR this returns the result of the OCR queries. + + :param library_id: + :param document_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsGetExtractedTextSignedURLV1Request( + library_id=library_id, + document_id=document_id, + ) + + req = self._build_request_async( + method="GET", + path="/v1/libraries/{library_id}/documents/{document_id}/extracted-text-signed-url", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_get_extracted_text_signed_url_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(str, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def reprocess( + self, + *, + library_id: str, + document_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ): + r"""Reprocess a document. + + Given a library and a document in that library, reprocess that document, it will be billed again. + + :param library_id: + :param document_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsReprocessV1Request( + library_id=library_id, + document_id=document_id, + ) + + req = self._build_request( + method="POST", + path="/v1/libraries/{library_id}/documents/{document_id}/reprocess", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_reprocess_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "204", "*"): + return + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def reprocess_async( + self, + *, + library_id: str, + document_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ): + r"""Reprocess a document. + + Given a library and a document in that library, reprocess that document, it will be billed again. + + :param library_id: + :param document_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsReprocessV1Request( + library_id=library_id, + document_id=document_id, + ) + + req = self._build_request_async( + method="POST", + path="/v1/libraries/{library_id}/documents/{document_id}/reprocess", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_reprocess_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "204", "*"): + return + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/client/embeddings.py b/src/mistralai/client/embeddings.py new file mode 100644 index 00000000..5d55ffc4 --- /dev/null +++ b/src/mistralai/client/embeddings.py @@ -0,0 +1,234 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: f9c17258207e + +from .basesdk import BaseSDK +from mistralai.client import errors, models, utils +from mistralai.client._hooks import HookContext +from mistralai.client.types import OptionalNullable, UNSET +from mistralai.client.utils import get_security_from_env +from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response +from typing import Any, Dict, Mapping, Optional, Union + + +class Embeddings(BaseSDK): + r"""Embeddings API.""" + + def create( + self, + *, + model: str, + inputs: Union[ + models.EmbeddingRequestInputs, models.EmbeddingRequestInputsTypedDict + ], + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + output_dimension: OptionalNullable[int] = UNSET, + output_dtype: Optional[models.EmbeddingDtype] = None, + encoding_format: Optional[models.EncodingFormat] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.EmbeddingResponse: + r"""Embeddings + + Embeddings + + :param model: ID of the model to use. + :param inputs: Text to embed. + :param metadata: + :param output_dimension: The dimension of the output embeddings when feature available. If not provided, a default output dimension will be used. + :param output_dtype: + :param encoding_format: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.EmbeddingRequest( + model=model, + metadata=metadata, + inputs=inputs, + output_dimension=output_dimension, + output_dtype=output_dtype, + encoding_format=encoding_format, + ) + + req = self._build_request( + method="POST", + path="/v1/embeddings", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.EmbeddingRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="embeddings_v1_embeddings_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.EmbeddingResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def create_async( + self, + *, + model: str, + inputs: Union[ + models.EmbeddingRequestInputs, models.EmbeddingRequestInputsTypedDict + ], + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + output_dimension: OptionalNullable[int] = UNSET, + output_dtype: Optional[models.EmbeddingDtype] = None, + encoding_format: Optional[models.EncodingFormat] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.EmbeddingResponse: + r"""Embeddings + + Embeddings + + :param model: ID of the model to use. + :param inputs: Text to embed. + :param metadata: + :param output_dimension: The dimension of the output embeddings when feature available. If not provided, a default output dimension will be used. + :param output_dtype: + :param encoding_format: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.EmbeddingRequest( + model=model, + metadata=metadata, + inputs=inputs, + output_dimension=output_dimension, + output_dtype=output_dtype, + encoding_format=encoding_format, + ) + + req = self._build_request_async( + method="POST", + path="/v1/embeddings", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.EmbeddingRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="embeddings_v1_embeddings_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.EmbeddingResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/client/errors/__init__.py b/src/mistralai/client/errors/__init__.py new file mode 100644 index 00000000..58a591a1 --- /dev/null +++ b/src/mistralai/client/errors/__init__.py @@ -0,0 +1,40 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 0b2db51246df + +from .mistralerror import MistralError +from typing import Any, TYPE_CHECKING + +from mistralai.client.utils.dynamic_imports import lazy_getattr, lazy_dir + +if TYPE_CHECKING: + from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData + from .no_response_error import NoResponseError + from .responsevalidationerror import ResponseValidationError + from .sdkerror import SDKError + +__all__ = [ + "HTTPValidationError", + "HTTPValidationErrorData", + "MistralError", + "NoResponseError", + "ResponseValidationError", + "SDKError", +] + +_dynamic_imports: dict[str, str] = { + "HTTPValidationError": ".httpvalidationerror", + "HTTPValidationErrorData": ".httpvalidationerror", + "NoResponseError": ".no_response_error", + "ResponseValidationError": ".responsevalidationerror", + "SDKError": ".sdkerror", +} + + +def __getattr__(attr_name: str) -> Any: + return lazy_getattr( + attr_name, package=__package__, dynamic_imports=_dynamic_imports + ) + + +def __dir__(): + return lazy_dir(dynamic_imports=_dynamic_imports) diff --git a/src/mistralai/client/errors/httpvalidationerror.py b/src/mistralai/client/errors/httpvalidationerror.py new file mode 100644 index 00000000..97b16562 --- /dev/null +++ b/src/mistralai/client/errors/httpvalidationerror.py @@ -0,0 +1,29 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: ac3de4a52bb6 + +from __future__ import annotations +from dataclasses import dataclass, field +import httpx +from mistralai.client.errors import MistralError +from mistralai.client.models import validationerror as models_validationerror +from mistralai.client.types import BaseModel +from typing import List, Optional + + +class HTTPValidationErrorData(BaseModel): + detail: Optional[List[models_validationerror.ValidationError]] = None + + +@dataclass(unsafe_hash=True) +class HTTPValidationError(MistralError): + data: HTTPValidationErrorData = field(hash=False) + + def __init__( + self, + data: HTTPValidationErrorData, + raw_response: httpx.Response, + body: Optional[str] = None, + ): + message = body or raw_response.text + super().__init__(message, raw_response, body) + object.__setattr__(self, "data", data) diff --git a/src/mistralai/client/errors/mistralerror.py b/src/mistralai/client/errors/mistralerror.py new file mode 100644 index 00000000..eb73040c --- /dev/null +++ b/src/mistralai/client/errors/mistralerror.py @@ -0,0 +1,31 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: d1f57f0ff1e9 + +import httpx +from typing import Optional +from dataclasses import dataclass, field + + +@dataclass(unsafe_hash=True) +class MistralError(Exception): + """The base class for all HTTP error responses.""" + + message: str + status_code: int + body: str + headers: httpx.Headers = field(hash=False) + raw_response: httpx.Response = field(hash=False) + + def __init__( + self, message: str, raw_response: httpx.Response, body: Optional[str] = None + ): + object.__setattr__(self, "message", message) + object.__setattr__(self, "status_code", raw_response.status_code) + object.__setattr__( + self, "body", body if body is not None else raw_response.text + ) + object.__setattr__(self, "headers", raw_response.headers) + object.__setattr__(self, "raw_response", raw_response) + + def __str__(self): + return self.message diff --git a/src/mistralai/client/errors/no_response_error.py b/src/mistralai/client/errors/no_response_error.py new file mode 100644 index 00000000..d71dfa7b --- /dev/null +++ b/src/mistralai/client/errors/no_response_error.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 8b469ecb0906 + +from dataclasses import dataclass + + +@dataclass(unsafe_hash=True) +class NoResponseError(Exception): + """Error raised when no HTTP response is received from the server.""" + + message: str + + def __init__(self, message: str = "No response received"): + object.__setattr__(self, "message", message) + super().__init__(message) + + def __str__(self): + return self.message diff --git a/src/mistralai/client/errors/responsevalidationerror.py b/src/mistralai/client/errors/responsevalidationerror.py new file mode 100644 index 00000000..a7b3b9f0 --- /dev/null +++ b/src/mistralai/client/errors/responsevalidationerror.py @@ -0,0 +1,28 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 6cfaa3147abe + +import httpx +from typing import Optional +from dataclasses import dataclass + +from mistralai.client.errors import MistralError + + +@dataclass(unsafe_hash=True) +class ResponseValidationError(MistralError): + """Error raised when there is a type mismatch between the response data and the expected Pydantic model.""" + + def __init__( + self, + message: str, + raw_response: httpx.Response, + cause: Exception, + body: Optional[str] = None, + ): + message = f"{message}: {cause}" + super().__init__(message, raw_response, body) + + @property + def cause(self): + """Normally the Pydantic ValidationError""" + return self.__cause__ diff --git a/src/mistralai/client/errors/sdkerror.py b/src/mistralai/client/errors/sdkerror.py new file mode 100644 index 00000000..25b87255 --- /dev/null +++ b/src/mistralai/client/errors/sdkerror.py @@ -0,0 +1,41 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: c489ffe1e9ca + +import httpx +from typing import Optional +from dataclasses import dataclass + +from mistralai.client.errors import MistralError + +MAX_MESSAGE_LEN = 10_000 + + +@dataclass(unsafe_hash=True) +class SDKError(MistralError): + """The fallback error class if no more specific error class is matched.""" + + def __init__( + self, message: str, raw_response: httpx.Response, body: Optional[str] = None + ): + body_display = body or raw_response.text or '""' + + if message: + message += ": " + message += f"Status {raw_response.status_code}" + + headers = raw_response.headers + content_type = headers.get("content-type", '""') + if content_type != "application/json": + if " " in content_type: + content_type = f'"{content_type}"' + message += f" Content-Type {content_type}" + + if len(body_display) > MAX_MESSAGE_LEN: + truncated = body_display[:MAX_MESSAGE_LEN] + remaining = len(body_display) - MAX_MESSAGE_LEN + body_display = f"{truncated}...and {remaining} more chars" + + message += f". Body: {body_display}" + message = message.strip() + + super().__init__(message, raw_response, body) diff --git a/src/mistralai/client/files.py b/src/mistralai/client/files.py new file mode 100644 index 00000000..a5f3adf6 --- /dev/null +++ b/src/mistralai/client/files.py @@ -0,0 +1,1107 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: f12df4b2ce43 + +from .basesdk import BaseSDK +import httpx +from mistralai.client import errors, models, utils +from mistralai.client._hooks import HookContext +from mistralai.client.types import OptionalNullable, UNSET +from mistralai.client.utils import get_security_from_env +from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response +from typing import List, Mapping, Optional, Union + + +class Files(BaseSDK): + r"""Files API""" + + def upload( + self, + *, + file: Union[models.File, models.FileTypedDict], + purpose: Optional[models.FilePurpose] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.CreateFileResponse: + r"""Upload File + + Upload a file that can be used across various endpoints. + + The size of individual files can be a maximum of 512 MB. The Fine-tuning API only supports .jsonl files. + + Please contact us if you need to increase these storage limits. + + :param file: The File object (not file name) to be uploaded. + To upload a file and specify a custom file name you should format your request as such: + ```bash + file=@path/to/your/file.jsonl;filename=custom_name.jsonl + ``` + Otherwise, you can just keep the original file name: + ```bash + file=@path/to/your/file.jsonl + ``` + :param purpose: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.MultiPartBodyParams( + purpose=purpose, + file=utils.get_pydantic_model(file, models.File), + ) + + req = self._build_request( + method="POST", + path="/v1/files", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "multipart", models.MultiPartBodyParams + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="files_api_routes_upload_file", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.CreateFileResponse, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def upload_async( + self, + *, + file: Union[models.File, models.FileTypedDict], + purpose: Optional[models.FilePurpose] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.CreateFileResponse: + r"""Upload File + + Upload a file that can be used across various endpoints. + + The size of individual files can be a maximum of 512 MB. The Fine-tuning API only supports .jsonl files. + + Please contact us if you need to increase these storage limits. + + :param file: The File object (not file name) to be uploaded. + To upload a file and specify a custom file name you should format your request as such: + ```bash + file=@path/to/your/file.jsonl;filename=custom_name.jsonl + ``` + Otherwise, you can just keep the original file name: + ```bash + file=@path/to/your/file.jsonl + ``` + :param purpose: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.MultiPartBodyParams( + purpose=purpose, + file=utils.get_pydantic_model(file, models.File), + ) + + req = self._build_request_async( + method="POST", + path="/v1/files", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "multipart", models.MultiPartBodyParams + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="files_api_routes_upload_file", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.CreateFileResponse, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def list( + self, + *, + page: Optional[int] = 0, + page_size: Optional[int] = 100, + include_total: Optional[bool] = True, + sample_type: OptionalNullable[List[models.SampleType]] = UNSET, + source: OptionalNullable[List[models.Source]] = UNSET, + search: OptionalNullable[str] = UNSET, + purpose: OptionalNullable[models.FilePurpose] = UNSET, + mimetypes: OptionalNullable[List[str]] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ListFilesResponse: + r"""List Files + + Returns a list of files that belong to the user's organization. + + :param page: + :param page_size: + :param include_total: + :param sample_type: + :param source: + :param search: + :param purpose: + :param mimetypes: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.FilesAPIRoutesListFilesRequest( + page=page, + page_size=page_size, + include_total=include_total, + sample_type=sample_type, + source=source, + search=search, + purpose=purpose, + mimetypes=mimetypes, + ) + + req = self._build_request( + method="GET", + path="/v1/files", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="files_api_routes_list_files", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ListFilesResponse, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def list_async( + self, + *, + page: Optional[int] = 0, + page_size: Optional[int] = 100, + include_total: Optional[bool] = True, + sample_type: OptionalNullable[List[models.SampleType]] = UNSET, + source: OptionalNullable[List[models.Source]] = UNSET, + search: OptionalNullable[str] = UNSET, + purpose: OptionalNullable[models.FilePurpose] = UNSET, + mimetypes: OptionalNullable[List[str]] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ListFilesResponse: + r"""List Files + + Returns a list of files that belong to the user's organization. + + :param page: + :param page_size: + :param include_total: + :param sample_type: + :param source: + :param search: + :param purpose: + :param mimetypes: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.FilesAPIRoutesListFilesRequest( + page=page, + page_size=page_size, + include_total=include_total, + sample_type=sample_type, + source=source, + search=search, + purpose=purpose, + mimetypes=mimetypes, + ) + + req = self._build_request_async( + method="GET", + path="/v1/files", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="files_api_routes_list_files", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ListFilesResponse, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def retrieve( + self, + *, + file_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.GetFileResponse: + r"""Retrieve File + + Returns information about a specific file. + + :param file_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.FilesAPIRoutesRetrieveFileRequest( + file_id=file_id, + ) + + req = self._build_request( + method="GET", + path="/v1/files/{file_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="files_api_routes_retrieve_file", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.GetFileResponse, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def retrieve_async( + self, + *, + file_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.GetFileResponse: + r"""Retrieve File + + Returns information about a specific file. + + :param file_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.FilesAPIRoutesRetrieveFileRequest( + file_id=file_id, + ) + + req = self._build_request_async( + method="GET", + path="/v1/files/{file_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="files_api_routes_retrieve_file", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.GetFileResponse, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def delete( + self, + *, + file_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.DeleteFileResponse: + r"""Delete File + + Delete a file. + + :param file_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.FilesAPIRoutesDeleteFileRequest( + file_id=file_id, + ) + + req = self._build_request( + method="DELETE", + path="/v1/files/{file_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="files_api_routes_delete_file", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.DeleteFileResponse, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def delete_async( + self, + *, + file_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.DeleteFileResponse: + r"""Delete File + + Delete a file. + + :param file_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.FilesAPIRoutesDeleteFileRequest( + file_id=file_id, + ) + + req = self._build_request_async( + method="DELETE", + path="/v1/files/{file_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="files_api_routes_delete_file", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.DeleteFileResponse, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def download( + self, + *, + file_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> httpx.Response: + r"""Download File + + Download a file + + :param file_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.FilesAPIRoutesDownloadFileRequest( + file_id=file_id, + ) + + req = self._build_request( + method="GET", + path="/v1/files/{file_id}/content", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/octet-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="files_api_routes_download_file", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/octet-stream"): + return http_res + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) + + async def download_async( + self, + *, + file_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> httpx.Response: + r"""Download File + + Download a file + + :param file_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.FilesAPIRoutesDownloadFileRequest( + file_id=file_id, + ) + + req = self._build_request_async( + method="GET", + path="/v1/files/{file_id}/content", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/octet-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="files_api_routes_download_file", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/octet-stream"): + return http_res + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) + + def get_signed_url( + self, + *, + file_id: str, + expiry: Optional[int] = 24, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.GetSignedURLResponse: + r"""Get Signed Url + + :param file_id: + :param expiry: Number of hours before the url becomes invalid. Defaults to 24h + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.FilesAPIRoutesGetSignedURLRequest( + file_id=file_id, + expiry=expiry, + ) + + req = self._build_request( + method="GET", + path="/v1/files/{file_id}/url", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="files_api_routes_get_signed_url", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.GetSignedURLResponse, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def get_signed_url_async( + self, + *, + file_id: str, + expiry: Optional[int] = 24, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.GetSignedURLResponse: + r"""Get Signed Url + + :param file_id: + :param expiry: Number of hours before the url becomes invalid. Defaults to 24h + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.FilesAPIRoutesGetSignedURLRequest( + file_id=file_id, + expiry=expiry, + ) + + req = self._build_request_async( + method="GET", + path="/v1/files/{file_id}/url", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="files_api_routes_get_signed_url", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.GetSignedURLResponse, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/client/fim.py b/src/mistralai/client/fim.py new file mode 100644 index 00000000..8ffb7730 --- /dev/null +++ b/src/mistralai/client/fim.py @@ -0,0 +1,542 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 217bea5d701d + +from .basesdk import BaseSDK +from mistralai.client import errors, models, utils +from mistralai.client._hooks import HookContext +from mistralai.client.types import OptionalNullable, UNSET +from mistralai.client.utils import eventstreaming, get_security_from_env +from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response +from typing import Any, Dict, Mapping, Optional, Union + + +class Fim(BaseSDK): + r"""Fill-in-the-middle API.""" + + def complete( + self, + *, + model: str, + prompt: str, + temperature: OptionalNullable[float] = UNSET, + top_p: Optional[float] = 1, + max_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = False, + stop: Optional[ + Union[ + models.FIMCompletionRequestStop, + models.FIMCompletionRequestStopTypedDict, + ] + ] = None, + random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + suffix: OptionalNullable[str] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.FIMCompletionResponse: + r"""Fim Completion + + FIM completion. + + :param model: ID of the model with FIM to use. + :param prompt: The text/code to complete. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: + :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. + :param min_tokens: The minimum number of tokens to generate in the completion. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.FIMCompletionRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + metadata=metadata, + prompt=prompt, + suffix=suffix, + min_tokens=min_tokens, + ) + + req = self._build_request( + method="POST", + path="/v1/fim/completions", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.FIMCompletionRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="fim_completion_v1_fim_completions_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.FIMCompletionResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def complete_async( + self, + *, + model: str, + prompt: str, + temperature: OptionalNullable[float] = UNSET, + top_p: Optional[float] = 1, + max_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = False, + stop: Optional[ + Union[ + models.FIMCompletionRequestStop, + models.FIMCompletionRequestStopTypedDict, + ] + ] = None, + random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + suffix: OptionalNullable[str] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.FIMCompletionResponse: + r"""Fim Completion + + FIM completion. + + :param model: ID of the model with FIM to use. + :param prompt: The text/code to complete. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: + :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. + :param min_tokens: The minimum number of tokens to generate in the completion. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.FIMCompletionRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + metadata=metadata, + prompt=prompt, + suffix=suffix, + min_tokens=min_tokens, + ) + + req = self._build_request_async( + method="POST", + path="/v1/fim/completions", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.FIMCompletionRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="fim_completion_v1_fim_completions_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.FIMCompletionResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def stream( + self, + *, + model: str, + prompt: str, + temperature: OptionalNullable[float] = UNSET, + top_p: Optional[float] = 1, + max_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = True, + stop: Optional[ + Union[ + models.FIMCompletionStreamRequestStop, + models.FIMCompletionStreamRequestStopTypedDict, + ] + ] = None, + random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + suffix: OptionalNullable[str] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStream[models.CompletionEvent]: + r"""Stream fim completion + + Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + + :param model: ID of the model with FIM to use. + :param prompt: The text/code to complete. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param stream: + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: + :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. + :param min_tokens: The minimum number of tokens to generate in the completion. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.FIMCompletionStreamRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + metadata=metadata, + prompt=prompt, + suffix=suffix, + min_tokens=min_tokens, + ) + + req = self._build_request( + method="POST", + path="/v1/fim/completions#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.FIMCompletionStreamRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="stream_fim", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStream( + http_res, + lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), + sentinel="[DONE]", + client_ref=self, + ) + if utils.match_response(http_res, "422", "application/json"): + http_res_text = utils.stream_to_text(http_res) + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res, http_res_text + ) + raise errors.HTTPValidationError(response_data, http_res, http_res_text) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) + + async def stream_async( + self, + *, + model: str, + prompt: str, + temperature: OptionalNullable[float] = UNSET, + top_p: Optional[float] = 1, + max_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = True, + stop: Optional[ + Union[ + models.FIMCompletionStreamRequestStop, + models.FIMCompletionStreamRequestStopTypedDict, + ] + ] = None, + random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + suffix: OptionalNullable[str] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStreamAsync[models.CompletionEvent]: + r"""Stream fim completion + + Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + + :param model: ID of the model with FIM to use. + :param prompt: The text/code to complete. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param stream: + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: + :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. + :param min_tokens: The minimum number of tokens to generate in the completion. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.FIMCompletionStreamRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + metadata=metadata, + prompt=prompt, + suffix=suffix, + min_tokens=min_tokens, + ) + + req = self._build_request_async( + method="POST", + path="/v1/fim/completions#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.FIMCompletionStreamRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="stream_fim", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStreamAsync( + http_res, + lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), + sentinel="[DONE]", + client_ref=self, + ) + if utils.match_response(http_res, "422", "application/json"): + http_res_text = await utils.stream_to_text_async(http_res) + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res, http_res_text + ) + raise errors.HTTPValidationError(response_data, http_res, http_res_text) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) diff --git a/src/mistralai/client/fine_tuning.py b/src/mistralai/client/fine_tuning.py new file mode 100644 index 00000000..df6bc564 --- /dev/null +++ b/src/mistralai/client/fine_tuning.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 5d5079bbd54e + +from .basesdk import BaseSDK +from .sdkconfiguration import SDKConfiguration +from mistralai.client.fine_tuning_jobs import FineTuningJobs +from typing import Optional + + +class FineTuning(BaseSDK): + jobs: FineTuningJobs + + def __init__( + self, sdk_config: SDKConfiguration, parent_ref: Optional[object] = None + ) -> None: + BaseSDK.__init__(self, sdk_config, parent_ref=parent_ref) + self.sdk_configuration = sdk_config + self._init_sdks() + + def _init_sdks(self): + self.jobs = FineTuningJobs(self.sdk_configuration, parent_ref=self.parent_ref) diff --git a/src/mistralai/client/fine_tuning_jobs.py b/src/mistralai/client/fine_tuning_jobs.py new file mode 100644 index 00000000..c2ee871b --- /dev/null +++ b/src/mistralai/client/fine_tuning_jobs.py @@ -0,0 +1,1045 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: fa1ea246e0b2 + +from .basesdk import BaseSDK +from datetime import datetime +from mistralai.client import errors, models, utils +from mistralai.client._hooks import HookContext +from mistralai.client.types import OptionalNullable, UNSET +from mistralai.client.utils import get_security_from_env +from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response +from typing import List, Mapping, Optional, Union + + +class FineTuningJobs(BaseSDK): + def list( + self, + *, + page: Optional[int] = 0, + page_size: Optional[int] = 100, + model: OptionalNullable[str] = UNSET, + created_after: OptionalNullable[datetime] = UNSET, + created_before: OptionalNullable[datetime] = UNSET, + created_by_me: Optional[bool] = False, + status: OptionalNullable[ + models.JobsAPIRoutesFineTuningGetFineTuningJobsStatus + ] = UNSET, + wandb_project: OptionalNullable[str] = UNSET, + wandb_name: OptionalNullable[str] = UNSET, + suffix: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ListFineTuningJobsResponse: + r"""Get Fine Tuning Jobs + + Get a list of fine-tuning jobs for your organization and user. + + :param page: The page number of the results to be returned. + :param page_size: The number of items to return per page. + :param model: The model name used for fine-tuning to filter on. When set, the other results are not displayed. + :param created_after: The date/time to filter on. When set, the results for previous creation times are not displayed. + :param created_before: + :param created_by_me: When set, only return results for jobs created by the API caller. Other results are not displayed. + :param status: The current job state to filter on. When set, the other results are not displayed. + :param wandb_project: The Weights and Biases project to filter on. When set, the other results are not displayed. + :param wandb_name: The Weight and Biases run name to filter on. When set, the other results are not displayed. + :param suffix: The model suffix to filter on. When set, the other results are not displayed. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JobsAPIRoutesFineTuningGetFineTuningJobsRequest( + page=page, + page_size=page_size, + model=model, + created_after=created_after, + created_before=created_before, + created_by_me=created_by_me, + status=status, + wandb_project=wandb_project, + wandb_name=wandb_name, + suffix=suffix, + ) + + req = self._build_request( + method="GET", + path="/v1/fine_tuning/jobs", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_jobs", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ListFineTuningJobsResponse, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def list_async( + self, + *, + page: Optional[int] = 0, + page_size: Optional[int] = 100, + model: OptionalNullable[str] = UNSET, + created_after: OptionalNullable[datetime] = UNSET, + created_before: OptionalNullable[datetime] = UNSET, + created_by_me: Optional[bool] = False, + status: OptionalNullable[ + models.JobsAPIRoutesFineTuningGetFineTuningJobsStatus + ] = UNSET, + wandb_project: OptionalNullable[str] = UNSET, + wandb_name: OptionalNullable[str] = UNSET, + suffix: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ListFineTuningJobsResponse: + r"""Get Fine Tuning Jobs + + Get a list of fine-tuning jobs for your organization and user. + + :param page: The page number of the results to be returned. + :param page_size: The number of items to return per page. + :param model: The model name used for fine-tuning to filter on. When set, the other results are not displayed. + :param created_after: The date/time to filter on. When set, the results for previous creation times are not displayed. + :param created_before: + :param created_by_me: When set, only return results for jobs created by the API caller. Other results are not displayed. + :param status: The current job state to filter on. When set, the other results are not displayed. + :param wandb_project: The Weights and Biases project to filter on. When set, the other results are not displayed. + :param wandb_name: The Weight and Biases run name to filter on. When set, the other results are not displayed. + :param suffix: The model suffix to filter on. When set, the other results are not displayed. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JobsAPIRoutesFineTuningGetFineTuningJobsRequest( + page=page, + page_size=page_size, + model=model, + created_after=created_after, + created_before=created_before, + created_by_me=created_by_me, + status=status, + wandb_project=wandb_project, + wandb_name=wandb_name, + suffix=suffix, + ) + + req = self._build_request_async( + method="GET", + path="/v1/fine_tuning/jobs", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_jobs", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ListFineTuningJobsResponse, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def create( + self, + *, + model: str, + hyperparameters: Union[models.Hyperparameters, models.HyperparametersTypedDict], + training_files: Optional[ + Union[List[models.TrainingFile], List[models.TrainingFileTypedDict]] + ] = None, + validation_files: OptionalNullable[List[str]] = UNSET, + suffix: OptionalNullable[str] = UNSET, + integrations: OptionalNullable[ + Union[ + List[models.CreateFineTuningJobRequestIntegration], + List[models.CreateFineTuningJobRequestIntegrationTypedDict], + ] + ] = UNSET, + auto_start: Optional[bool] = None, + invalid_sample_skip_percentage: Optional[float] = 0, + job_type: OptionalNullable[models.FineTuneableModelType] = UNSET, + repositories: OptionalNullable[ + Union[ + List[models.CreateFineTuningJobRequestRepository], + List[models.CreateFineTuningJobRequestRepositoryTypedDict], + ] + ] = UNSET, + classifier_targets: OptionalNullable[ + Union[List[models.ClassifierTarget], List[models.ClassifierTargetTypedDict]] + ] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse: + r"""Create Fine Tuning Job + + Create a new fine-tuning job, it will be queued for processing. + + :param model: + :param hyperparameters: + :param training_files: + :param validation_files: A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files. + :param suffix: A string that will be added to your fine-tuning model name. For example, a suffix of \"my-great-model\" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...` + :param integrations: A list of integrations to enable for your fine-tuning job. + :param auto_start: This field will be required in a future release. + :param invalid_sample_skip_percentage: + :param job_type: + :param repositories: + :param classifier_targets: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.CreateFineTuningJobRequest( + model=model, + training_files=utils.get_pydantic_model( + training_files, Optional[List[models.TrainingFile]] + ), + validation_files=validation_files, + suffix=suffix, + integrations=utils.get_pydantic_model( + integrations, + OptionalNullable[List[models.CreateFineTuningJobRequestIntegration]], + ), + auto_start=auto_start, + invalid_sample_skip_percentage=invalid_sample_skip_percentage, + job_type=job_type, + hyperparameters=utils.get_pydantic_model( + hyperparameters, models.Hyperparameters + ), + repositories=utils.get_pydantic_model( + repositories, + OptionalNullable[List[models.CreateFineTuningJobRequestRepository]], + ), + classifier_targets=utils.get_pydantic_model( + classifier_targets, OptionalNullable[List[models.ClassifierTarget]] + ), + ) + + req = self._build_request( + method="POST", + path="/v1/fine_tuning/jobs", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.CreateFineTuningJobRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_fine_tuning_create_fine_tuning_job", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response( + models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse, http_res + ) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def create_async( + self, + *, + model: str, + hyperparameters: Union[models.Hyperparameters, models.HyperparametersTypedDict], + training_files: Optional[ + Union[List[models.TrainingFile], List[models.TrainingFileTypedDict]] + ] = None, + validation_files: OptionalNullable[List[str]] = UNSET, + suffix: OptionalNullable[str] = UNSET, + integrations: OptionalNullable[ + Union[ + List[models.CreateFineTuningJobRequestIntegration], + List[models.CreateFineTuningJobRequestIntegrationTypedDict], + ] + ] = UNSET, + auto_start: Optional[bool] = None, + invalid_sample_skip_percentage: Optional[float] = 0, + job_type: OptionalNullable[models.FineTuneableModelType] = UNSET, + repositories: OptionalNullable[ + Union[ + List[models.CreateFineTuningJobRequestRepository], + List[models.CreateFineTuningJobRequestRepositoryTypedDict], + ] + ] = UNSET, + classifier_targets: OptionalNullable[ + Union[List[models.ClassifierTarget], List[models.ClassifierTargetTypedDict]] + ] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse: + r"""Create Fine Tuning Job + + Create a new fine-tuning job, it will be queued for processing. + + :param model: + :param hyperparameters: + :param training_files: + :param validation_files: A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files. + :param suffix: A string that will be added to your fine-tuning model name. For example, a suffix of \"my-great-model\" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...` + :param integrations: A list of integrations to enable for your fine-tuning job. + :param auto_start: This field will be required in a future release. + :param invalid_sample_skip_percentage: + :param job_type: + :param repositories: + :param classifier_targets: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.CreateFineTuningJobRequest( + model=model, + training_files=utils.get_pydantic_model( + training_files, Optional[List[models.TrainingFile]] + ), + validation_files=validation_files, + suffix=suffix, + integrations=utils.get_pydantic_model( + integrations, + OptionalNullable[List[models.CreateFineTuningJobRequestIntegration]], + ), + auto_start=auto_start, + invalid_sample_skip_percentage=invalid_sample_skip_percentage, + job_type=job_type, + hyperparameters=utils.get_pydantic_model( + hyperparameters, models.Hyperparameters + ), + repositories=utils.get_pydantic_model( + repositories, + OptionalNullable[List[models.CreateFineTuningJobRequestRepository]], + ), + classifier_targets=utils.get_pydantic_model( + classifier_targets, OptionalNullable[List[models.ClassifierTarget]] + ), + ) + + req = self._build_request_async( + method="POST", + path="/v1/fine_tuning/jobs", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.CreateFineTuningJobRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_fine_tuning_create_fine_tuning_job", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response( + models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse, http_res + ) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def get( + self, + *, + job_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.JobsAPIRoutesFineTuningGetFineTuningJobResponse: + r"""Get Fine Tuning Job + + Get a fine-tuned job details by its UUID. + + :param job_id: The ID of the job to analyse. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JobsAPIRoutesFineTuningGetFineTuningJobRequest( + job_id=job_id, + ) + + req = self._build_request( + method="GET", + path="/v1/fine_tuning/jobs/{job_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_job", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response( + models.JobsAPIRoutesFineTuningGetFineTuningJobResponse, http_res + ) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def get_async( + self, + *, + job_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.JobsAPIRoutesFineTuningGetFineTuningJobResponse: + r"""Get Fine Tuning Job + + Get a fine-tuned job details by its UUID. + + :param job_id: The ID of the job to analyse. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JobsAPIRoutesFineTuningGetFineTuningJobRequest( + job_id=job_id, + ) + + req = self._build_request_async( + method="GET", + path="/v1/fine_tuning/jobs/{job_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_job", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response( + models.JobsAPIRoutesFineTuningGetFineTuningJobResponse, http_res + ) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def cancel( + self, + *, + job_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.JobsAPIRoutesFineTuningCancelFineTuningJobResponse: + r"""Cancel Fine Tuning Job + + Request the cancellation of a fine tuning job. + + :param job_id: The ID of the job to cancel. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JobsAPIRoutesFineTuningCancelFineTuningJobRequest( + job_id=job_id, + ) + + req = self._build_request( + method="POST", + path="/v1/fine_tuning/jobs/{job_id}/cancel", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_fine_tuning_cancel_fine_tuning_job", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response( + models.JobsAPIRoutesFineTuningCancelFineTuningJobResponse, http_res + ) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def cancel_async( + self, + *, + job_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.JobsAPIRoutesFineTuningCancelFineTuningJobResponse: + r"""Cancel Fine Tuning Job + + Request the cancellation of a fine tuning job. + + :param job_id: The ID of the job to cancel. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JobsAPIRoutesFineTuningCancelFineTuningJobRequest( + job_id=job_id, + ) + + req = self._build_request_async( + method="POST", + path="/v1/fine_tuning/jobs/{job_id}/cancel", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_fine_tuning_cancel_fine_tuning_job", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response( + models.JobsAPIRoutesFineTuningCancelFineTuningJobResponse, http_res + ) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def start( + self, + *, + job_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.JobsAPIRoutesFineTuningStartFineTuningJobResponse: + r"""Start Fine Tuning Job + + Request the start of a validated fine tuning job. + + :param job_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JobsAPIRoutesFineTuningStartFineTuningJobRequest( + job_id=job_id, + ) + + req = self._build_request( + method="POST", + path="/v1/fine_tuning/jobs/{job_id}/start", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_fine_tuning_start_fine_tuning_job", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response( + models.JobsAPIRoutesFineTuningStartFineTuningJobResponse, http_res + ) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def start_async( + self, + *, + job_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.JobsAPIRoutesFineTuningStartFineTuningJobResponse: + r"""Start Fine Tuning Job + + Request the start of a validated fine tuning job. + + :param job_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JobsAPIRoutesFineTuningStartFineTuningJobRequest( + job_id=job_id, + ) + + req = self._build_request_async( + method="POST", + path="/v1/fine_tuning/jobs/{job_id}/start", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_fine_tuning_start_fine_tuning_job", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response( + models.JobsAPIRoutesFineTuningStartFineTuningJobResponse, http_res + ) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/client/httpclient.py b/src/mistralai/client/httpclient.py new file mode 100644 index 00000000..544af7f8 --- /dev/null +++ b/src/mistralai/client/httpclient.py @@ -0,0 +1,126 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 3e46bde74327 + +# pyright: reportReturnType = false +import asyncio +from typing_extensions import Protocol, runtime_checkable +import httpx +from typing import Any, Optional, Union + + +@runtime_checkable +class HttpClient(Protocol): + def send( + self, + request: httpx.Request, + *, + stream: bool = False, + auth: Union[ + httpx._types.AuthTypes, httpx._client.UseClientDefault, None + ] = httpx.USE_CLIENT_DEFAULT, + follow_redirects: Union[ + bool, httpx._client.UseClientDefault + ] = httpx.USE_CLIENT_DEFAULT, + ) -> httpx.Response: + pass + + def build_request( + self, + method: str, + url: httpx._types.URLTypes, + *, + content: Optional[httpx._types.RequestContent] = None, + data: Optional[httpx._types.RequestData] = None, + files: Optional[httpx._types.RequestFiles] = None, + json: Optional[Any] = None, + params: Optional[httpx._types.QueryParamTypes] = None, + headers: Optional[httpx._types.HeaderTypes] = None, + cookies: Optional[httpx._types.CookieTypes] = None, + timeout: Union[ + httpx._types.TimeoutTypes, httpx._client.UseClientDefault + ] = httpx.USE_CLIENT_DEFAULT, + extensions: Optional[httpx._types.RequestExtensions] = None, + ) -> httpx.Request: + pass + + def close(self) -> None: + pass + + +@runtime_checkable +class AsyncHttpClient(Protocol): + async def send( + self, + request: httpx.Request, + *, + stream: bool = False, + auth: Union[ + httpx._types.AuthTypes, httpx._client.UseClientDefault, None + ] = httpx.USE_CLIENT_DEFAULT, + follow_redirects: Union[ + bool, httpx._client.UseClientDefault + ] = httpx.USE_CLIENT_DEFAULT, + ) -> httpx.Response: + pass + + def build_request( + self, + method: str, + url: httpx._types.URLTypes, + *, + content: Optional[httpx._types.RequestContent] = None, + data: Optional[httpx._types.RequestData] = None, + files: Optional[httpx._types.RequestFiles] = None, + json: Optional[Any] = None, + params: Optional[httpx._types.QueryParamTypes] = None, + headers: Optional[httpx._types.HeaderTypes] = None, + cookies: Optional[httpx._types.CookieTypes] = None, + timeout: Union[ + httpx._types.TimeoutTypes, httpx._client.UseClientDefault + ] = httpx.USE_CLIENT_DEFAULT, + extensions: Optional[httpx._types.RequestExtensions] = None, + ) -> httpx.Request: + pass + + async def aclose(self) -> None: + pass + + +class ClientOwner(Protocol): + client: Union[HttpClient, None] + async_client: Union[AsyncHttpClient, None] + + +def close_clients( + owner: ClientOwner, + sync_client: Union[HttpClient, None], + sync_client_supplied: bool, + async_client: Union[AsyncHttpClient, None], + async_client_supplied: bool, +) -> None: + """ + A finalizer function that is meant to be used with weakref.finalize to close + httpx clients used by an SDK so that underlying resources can be garbage + collected. + """ + + # Unset the client/async_client properties so there are no more references + # to them from the owning SDK instance and they can be reaped. + owner.client = None + owner.async_client = None + if sync_client is not None and not sync_client_supplied: + try: + sync_client.close() + except Exception: + pass + + if async_client is not None and not async_client_supplied: + try: + loop = asyncio.get_running_loop() + asyncio.run_coroutine_threadsafe(async_client.aclose(), loop) + except RuntimeError: + try: + asyncio.run(async_client.aclose()) + except RuntimeError: + # best effort + pass diff --git a/src/mistralai/client/libraries.py b/src/mistralai/client/libraries.py new file mode 100644 index 00000000..b8728362 --- /dev/null +++ b/src/mistralai/client/libraries.py @@ -0,0 +1,955 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: d43a5f78045f + +from .basesdk import BaseSDK +from .sdkconfiguration import SDKConfiguration +from mistralai.client import errors, models, utils +from mistralai.client._hooks import HookContext +from mistralai.client.accesses import Accesses +from mistralai.client.documents import Documents +from mistralai.client.types import OptionalNullable, UNSET +from mistralai.client.utils import get_security_from_env +from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response +from typing import Any, Mapping, Optional + + +class Libraries(BaseSDK): + r"""(beta) Libraries API to create and manage libraries - index your documents to enhance agent capabilities.""" + + documents: Documents + r"""(beta) Libraries API - manage documents in a library.""" + accesses: Accesses + r"""(beta) Libraries API - manage access to a library.""" + + def __init__( + self, sdk_config: SDKConfiguration, parent_ref: Optional[object] = None + ) -> None: + BaseSDK.__init__(self, sdk_config, parent_ref=parent_ref) + self.sdk_configuration = sdk_config + self._init_sdks() + + def _init_sdks(self): + self.documents = Documents(self.sdk_configuration, parent_ref=self.parent_ref) + self.accesses = Accesses(self.sdk_configuration, parent_ref=self.parent_ref) + + def list( + self, + *, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ListLibrariesResponse: + r"""List all libraries you have access to. + + List all libraries that you have created or have been shared with you. + + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + req = self._build_request( + method="GET", + path="/v1/libraries", + base_url=base_url, + url_variables=url_variables, + request=None, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_list_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ListLibrariesResponse, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def list_async( + self, + *, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ListLibrariesResponse: + r"""List all libraries you have access to. + + List all libraries that you have created or have been shared with you. + + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + req = self._build_request_async( + method="GET", + path="/v1/libraries", + base_url=base_url, + url_variables=url_variables, + request=None, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_list_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ListLibrariesResponse, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def create( + self, + *, + name: str, + description: OptionalNullable[str] = UNSET, + chunk_size: OptionalNullable[int] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Library: + r"""Create a new Library. + + Create a new Library, you will be marked as the owner and only you will have the possibility to share it with others. When first created this will only be accessible by you. + + :param name: + :param description: + :param chunk_size: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.CreateLibraryRequest( + name=name, + description=description, + chunk_size=chunk_size, + ) + + req = self._build_request( + method="POST", + path="/v1/libraries", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.CreateLibraryRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_create_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "201", "application/json"): + return unmarshal_json_response(models.Library, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def create_async( + self, + *, + name: str, + description: OptionalNullable[str] = UNSET, + chunk_size: OptionalNullable[int] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Library: + r"""Create a new Library. + + Create a new Library, you will be marked as the owner and only you will have the possibility to share it with others. When first created this will only be accessible by you. + + :param name: + :param description: + :param chunk_size: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.CreateLibraryRequest( + name=name, + description=description, + chunk_size=chunk_size, + ) + + req = self._build_request_async( + method="POST", + path="/v1/libraries", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.CreateLibraryRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_create_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "201", "application/json"): + return unmarshal_json_response(models.Library, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def get( + self, + *, + library_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Library: + r"""Detailed information about a specific Library. + + Given a library id, details information about that Library. + + :param library_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesGetV1Request( + library_id=library_id, + ) + + req = self._build_request( + method="GET", + path="/v1/libraries/{library_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_get_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.Library, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def get_async( + self, + *, + library_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Library: + r"""Detailed information about a specific Library. + + Given a library id, details information about that Library. + + :param library_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesGetV1Request( + library_id=library_id, + ) + + req = self._build_request_async( + method="GET", + path="/v1/libraries/{library_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_get_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.Library, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def delete( + self, + *, + library_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Library: + r"""Delete a library and all of it's document. + + Given a library id, deletes it together with all documents that have been uploaded to that library. + + :param library_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDeleteV1Request( + library_id=library_id, + ) + + req = self._build_request( + method="DELETE", + path="/v1/libraries/{library_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_delete_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.Library, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def delete_async( + self, + *, + library_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Library: + r"""Delete a library and all of it's document. + + Given a library id, deletes it together with all documents that have been uploaded to that library. + + :param library_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDeleteV1Request( + library_id=library_id, + ) + + req = self._build_request_async( + method="DELETE", + path="/v1/libraries/{library_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_delete_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.Library, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def update( + self, + *, + library_id: str, + name: OptionalNullable[str] = UNSET, + description: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Library: + r"""Update a library. + + Given a library id, you can update the name and description. + + :param library_id: + :param name: + :param description: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesUpdateV1Request( + library_id=library_id, + update_library_request=models.UpdateLibraryRequest( + name=name, + description=description, + ), + ) + + req = self._build_request( + method="PUT", + path="/v1/libraries/{library_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.update_library_request, + False, + False, + "json", + models.UpdateLibraryRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_update_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.Library, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def update_async( + self, + *, + library_id: str, + name: OptionalNullable[str] = UNSET, + description: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Library: + r"""Update a library. + + Given a library id, you can update the name and description. + + :param library_id: + :param name: + :param description: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesUpdateV1Request( + library_id=library_id, + update_library_request=models.UpdateLibraryRequest( + name=name, + description=description, + ), + ) + + req = self._build_request_async( + method="PUT", + path="/v1/libraries/{library_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.update_library_request, + False, + False, + "json", + models.UpdateLibraryRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_update_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.Library, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/client/models/__init__.py b/src/mistralai/client/models/__init__.py new file mode 100644 index 00000000..7d2dfd97 --- /dev/null +++ b/src/mistralai/client/models/__init__.py @@ -0,0 +1,2372 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: e0e8dad92725 + +from typing import Any, TYPE_CHECKING + +from mistralai.client.utils.dynamic_imports import lazy_getattr, lazy_dir + +if TYPE_CHECKING: + from .agent import ( + Agent, + AgentTool, + AgentToolTypedDict, + AgentTypedDict, + UnknownAgentTool, + ) + from .agentaliasresponse import AgentAliasResponse, AgentAliasResponseTypedDict + from .agentconversation import ( + AgentConversation, + AgentConversationAgentVersion, + AgentConversationAgentVersionTypedDict, + AgentConversationTypedDict, + ) + from .agenthandoffdoneevent import ( + AgentHandoffDoneEvent, + AgentHandoffDoneEventTypedDict, + ) + from .agenthandoffentry import AgentHandoffEntry, AgentHandoffEntryTypedDict + from .agenthandoffstartedevent import ( + AgentHandoffStartedEvent, + AgentHandoffStartedEventTypedDict, + ) + from .agents_api_v1_agents_create_or_update_aliasop import ( + AgentsAPIV1AgentsCreateOrUpdateAliasRequest, + AgentsAPIV1AgentsCreateOrUpdateAliasRequestTypedDict, + ) + from .agents_api_v1_agents_delete_aliasop import ( + AgentsAPIV1AgentsDeleteAliasRequest, + AgentsAPIV1AgentsDeleteAliasRequestTypedDict, + ) + from .agents_api_v1_agents_deleteop import ( + AgentsAPIV1AgentsDeleteRequest, + AgentsAPIV1AgentsDeleteRequestTypedDict, + ) + from .agents_api_v1_agents_get_versionop import ( + AgentsAPIV1AgentsGetVersionRequest, + AgentsAPIV1AgentsGetVersionRequestTypedDict, + ) + from .agents_api_v1_agents_getop import ( + AgentsAPIV1AgentsGetAgentVersion, + AgentsAPIV1AgentsGetAgentVersionTypedDict, + AgentsAPIV1AgentsGetRequest, + AgentsAPIV1AgentsGetRequestTypedDict, + ) + from .agents_api_v1_agents_list_version_aliasesop import ( + AgentsAPIV1AgentsListVersionAliasesRequest, + AgentsAPIV1AgentsListVersionAliasesRequestTypedDict, + ) + from .agents_api_v1_agents_list_versionsop import ( + AgentsAPIV1AgentsListVersionsRequest, + AgentsAPIV1AgentsListVersionsRequestTypedDict, + ) + from .agents_api_v1_agents_listop import ( + AgentsAPIV1AgentsListRequest, + AgentsAPIV1AgentsListRequestTypedDict, + ) + from .agents_api_v1_agents_update_versionop import ( + AgentsAPIV1AgentsUpdateVersionRequest, + AgentsAPIV1AgentsUpdateVersionRequestTypedDict, + ) + from .agents_api_v1_agents_updateop import ( + AgentsAPIV1AgentsUpdateRequest, + AgentsAPIV1AgentsUpdateRequestTypedDict, + ) + from .agents_api_v1_conversations_append_streamop import ( + AgentsAPIV1ConversationsAppendStreamRequest, + AgentsAPIV1ConversationsAppendStreamRequestTypedDict, + ) + from .agents_api_v1_conversations_appendop import ( + AgentsAPIV1ConversationsAppendRequest, + AgentsAPIV1ConversationsAppendRequestTypedDict, + ) + from .agents_api_v1_conversations_deleteop import ( + AgentsAPIV1ConversationsDeleteRequest, + AgentsAPIV1ConversationsDeleteRequestTypedDict, + ) + from .agents_api_v1_conversations_getop import ( + AgentsAPIV1ConversationsGetRequest, + AgentsAPIV1ConversationsGetRequestTypedDict, + ResponseV1ConversationsGet, + ResponseV1ConversationsGetTypedDict, + ) + from .agents_api_v1_conversations_historyop import ( + AgentsAPIV1ConversationsHistoryRequest, + AgentsAPIV1ConversationsHistoryRequestTypedDict, + ) + from .agents_api_v1_conversations_listop import ( + AgentsAPIV1ConversationsListRequest, + AgentsAPIV1ConversationsListRequestTypedDict, + AgentsAPIV1ConversationsListResponse, + AgentsAPIV1ConversationsListResponseTypedDict, + ) + from .agents_api_v1_conversations_messagesop import ( + AgentsAPIV1ConversationsMessagesRequest, + AgentsAPIV1ConversationsMessagesRequestTypedDict, + ) + from .agents_api_v1_conversations_restart_streamop import ( + AgentsAPIV1ConversationsRestartStreamRequest, + AgentsAPIV1ConversationsRestartStreamRequestTypedDict, + ) + from .agents_api_v1_conversations_restartop import ( + AgentsAPIV1ConversationsRestartRequest, + AgentsAPIV1ConversationsRestartRequestTypedDict, + ) + from .agentscompletionrequest import ( + AgentsCompletionRequest, + AgentsCompletionRequestMessage, + AgentsCompletionRequestMessageTypedDict, + AgentsCompletionRequestStop, + AgentsCompletionRequestStopTypedDict, + AgentsCompletionRequestToolChoice, + AgentsCompletionRequestToolChoiceTypedDict, + AgentsCompletionRequestTypedDict, + ) + from .agentscompletionstreamrequest import ( + AgentsCompletionStreamRequest, + AgentsCompletionStreamRequestMessage, + AgentsCompletionStreamRequestMessageTypedDict, + AgentsCompletionStreamRequestStop, + AgentsCompletionStreamRequestStopTypedDict, + AgentsCompletionStreamRequestToolChoice, + AgentsCompletionStreamRequestToolChoiceTypedDict, + AgentsCompletionStreamRequestTypedDict, + ) + from .apiendpoint import APIEndpoint + from .archivemodelresponse import ( + ArchiveModelResponse, + ArchiveModelResponseTypedDict, + ) + from .assistantmessage import ( + AssistantMessage, + AssistantMessageContent, + AssistantMessageContentTypedDict, + AssistantMessageTypedDict, + ) + from .audiochunk import AudioChunk, AudioChunkTypedDict + from .audioencoding import AudioEncoding + from .audioformat import AudioFormat, AudioFormatTypedDict + from .audiotranscriptionrequest import ( + AudioTranscriptionRequest, + AudioTranscriptionRequestTypedDict, + ) + from .audiotranscriptionrequeststream import ( + AudioTranscriptionRequestStream, + AudioTranscriptionRequestStreamTypedDict, + ) + from .basemodelcard import BaseModelCard, BaseModelCardTypedDict + from .batcherror import BatchError, BatchErrorTypedDict + from .batchjob import BatchJob, BatchJobTypedDict + from .batchjobstatus import BatchJobStatus + from .batchrequest import BatchRequest, BatchRequestTypedDict + from .builtinconnectors import BuiltInConnectors + from .chatclassificationrequest import ( + ChatClassificationRequest, + ChatClassificationRequestTypedDict, + ) + from .chatcompletionchoice import ( + ChatCompletionChoice, + ChatCompletionChoiceFinishReason, + ChatCompletionChoiceTypedDict, + ) + from .chatcompletionrequest import ( + ChatCompletionRequest, + ChatCompletionRequestMessage, + ChatCompletionRequestMessageTypedDict, + ChatCompletionRequestStop, + ChatCompletionRequestStopTypedDict, + ChatCompletionRequestToolChoice, + ChatCompletionRequestToolChoiceTypedDict, + ChatCompletionRequestTypedDict, + ) + from .chatcompletionresponse import ( + ChatCompletionResponse, + ChatCompletionResponseTypedDict, + ) + from .chatcompletionstreamrequest import ( + ChatCompletionStreamRequest, + ChatCompletionStreamRequestMessage, + ChatCompletionStreamRequestMessageTypedDict, + ChatCompletionStreamRequestStop, + ChatCompletionStreamRequestStopTypedDict, + ChatCompletionStreamRequestToolChoice, + ChatCompletionStreamRequestToolChoiceTypedDict, + ChatCompletionStreamRequestTypedDict, + ) + from .chatmoderationrequest import ( + ChatModerationRequest, + ChatModerationRequestInputs1, + ChatModerationRequestInputs1TypedDict, + ChatModerationRequestInputs2, + ChatModerationRequestInputs2TypedDict, + ChatModerationRequestInputs3, + ChatModerationRequestInputs3TypedDict, + ChatModerationRequestTypedDict, + ) + from .checkpoint import Checkpoint, CheckpointTypedDict + from .classificationrequest import ( + ClassificationRequest, + ClassificationRequestInputs, + ClassificationRequestInputsTypedDict, + ClassificationRequestTypedDict, + ) + from .classificationresponse import ( + ClassificationResponse, + ClassificationResponseTypedDict, + ) + from .classificationtargetresult import ( + ClassificationTargetResult, + ClassificationTargetResultTypedDict, + ) + from .classifierfinetunedmodel import ( + ClassifierFineTunedModel, + ClassifierFineTunedModelTypedDict, + ) + from .classifierfinetuningjob import ( + ClassifierFineTuningJob, + ClassifierFineTuningJobIntegration, + ClassifierFineTuningJobIntegrationTypedDict, + ClassifierFineTuningJobStatus, + ClassifierFineTuningJobTypedDict, + UnknownClassifierFineTuningJobIntegration, + ) + from .classifierfinetuningjobdetails import ( + ClassifierFineTuningJobDetails, + ClassifierFineTuningJobDetailsIntegration, + ClassifierFineTuningJobDetailsIntegrationTypedDict, + ClassifierFineTuningJobDetailsStatus, + ClassifierFineTuningJobDetailsTypedDict, + UnknownClassifierFineTuningJobDetailsIntegration, + ) + from .classifiertarget import ClassifierTarget, ClassifierTargetTypedDict + from .classifiertargetresult import ( + ClassifierTargetResult, + ClassifierTargetResultTypedDict, + ) + from .classifiertrainingparameters import ( + ClassifierTrainingParameters, + ClassifierTrainingParametersTypedDict, + ) + from .codeinterpretertool import CodeInterpreterTool, CodeInterpreterToolTypedDict + from .completionargs import CompletionArgs, CompletionArgsTypedDict + from .completionargsstop import CompletionArgsStop, CompletionArgsStopTypedDict + from .completionchunk import CompletionChunk, CompletionChunkTypedDict + from .completionevent import CompletionEvent, CompletionEventTypedDict + from .completionfinetunedmodel import ( + CompletionFineTunedModel, + CompletionFineTunedModelTypedDict, + ) + from .completionfinetuningjob import ( + CompletionFineTuningJob, + CompletionFineTuningJobIntegration, + CompletionFineTuningJobIntegrationTypedDict, + CompletionFineTuningJobRepository, + CompletionFineTuningJobRepositoryTypedDict, + CompletionFineTuningJobStatus, + CompletionFineTuningJobTypedDict, + UnknownCompletionFineTuningJobIntegration, + UnknownCompletionFineTuningJobRepository, + ) + from .completionfinetuningjobdetails import ( + CompletionFineTuningJobDetails, + CompletionFineTuningJobDetailsIntegration, + CompletionFineTuningJobDetailsIntegrationTypedDict, + CompletionFineTuningJobDetailsRepository, + CompletionFineTuningJobDetailsRepositoryTypedDict, + CompletionFineTuningJobDetailsStatus, + CompletionFineTuningJobDetailsTypedDict, + UnknownCompletionFineTuningJobDetailsIntegration, + UnknownCompletionFineTuningJobDetailsRepository, + ) + from .completionresponsestreamchoice import ( + CompletionResponseStreamChoice, + CompletionResponseStreamChoiceFinishReason, + CompletionResponseStreamChoiceTypedDict, + ) + from .completiontrainingparameters import ( + CompletionTrainingParameters, + CompletionTrainingParametersTypedDict, + ) + from .contentchunk import ContentChunk, ContentChunkTypedDict, UnknownContentChunk + from .conversationappendrequest import ( + ConversationAppendRequest, + ConversationAppendRequestHandoffExecution, + ConversationAppendRequestTypedDict, + ) + from .conversationappendstreamrequest import ( + ConversationAppendStreamRequest, + ConversationAppendStreamRequestHandoffExecution, + ConversationAppendStreamRequestTypedDict, + ) + from .conversationevents import ( + ConversationEvents, + ConversationEventsData, + ConversationEventsDataTypedDict, + ConversationEventsTypedDict, + UnknownConversationEventsData, + ) + from .conversationhistory import ( + ConversationHistory, + ConversationHistoryTypedDict, + Entry, + EntryTypedDict, + ) + from .conversationinputs import ConversationInputs, ConversationInputsTypedDict + from .conversationmessages import ( + ConversationMessages, + ConversationMessagesTypedDict, + ) + from .conversationrequest import ( + ConversationRequest, + ConversationRequestAgentVersion, + ConversationRequestAgentVersionTypedDict, + ConversationRequestHandoffExecution, + ConversationRequestTool, + ConversationRequestToolTypedDict, + ConversationRequestTypedDict, + ) + from .conversationresponse import ( + ConversationResponse, + ConversationResponseTypedDict, + Output, + OutputTypedDict, + ) + from .conversationrestartrequest import ( + ConversationRestartRequest, + ConversationRestartRequestAgentVersion, + ConversationRestartRequestAgentVersionTypedDict, + ConversationRestartRequestHandoffExecution, + ConversationRestartRequestTypedDict, + ) + from .conversationrestartstreamrequest import ( + ConversationRestartStreamRequest, + ConversationRestartStreamRequestAgentVersion, + ConversationRestartStreamRequestAgentVersionTypedDict, + ConversationRestartStreamRequestHandoffExecution, + ConversationRestartStreamRequestTypedDict, + ) + from .conversationstreamrequest import ( + ConversationStreamRequest, + ConversationStreamRequestAgentVersion, + ConversationStreamRequestAgentVersionTypedDict, + ConversationStreamRequestHandoffExecution, + ConversationStreamRequestTool, + ConversationStreamRequestToolTypedDict, + ConversationStreamRequestTypedDict, + ) + from .conversationthinkchunk import ( + ConversationThinkChunk, + ConversationThinkChunkThinking, + ConversationThinkChunkThinkingTypedDict, + ConversationThinkChunkTypedDict, + ) + from .conversationusageinfo import ( + ConversationUsageInfo, + ConversationUsageInfoTypedDict, + ) + from .createagentrequest import ( + CreateAgentRequest, + CreateAgentRequestTool, + CreateAgentRequestToolTypedDict, + CreateAgentRequestTypedDict, + ) + from .createbatchjobrequest import ( + CreateBatchJobRequest, + CreateBatchJobRequestTypedDict, + ) + from .createfileresponse import CreateFileResponse, CreateFileResponseTypedDict + from .createfinetuningjobrequest import ( + CreateFineTuningJobRequest, + CreateFineTuningJobRequestIntegration, + CreateFineTuningJobRequestIntegrationTypedDict, + CreateFineTuningJobRequestRepository, + CreateFineTuningJobRequestRepositoryTypedDict, + CreateFineTuningJobRequestTypedDict, + Hyperparameters, + HyperparametersTypedDict, + ) + from .createlibraryrequest import ( + CreateLibraryRequest, + CreateLibraryRequestTypedDict, + ) + from .delete_model_v1_models_model_id_deleteop import ( + DeleteModelV1ModelsModelIDDeleteRequest, + DeleteModelV1ModelsModelIDDeleteRequestTypedDict, + ) + from .deletefileresponse import DeleteFileResponse, DeleteFileResponseTypedDict + from .deletemodelout import DeleteModelOut, DeleteModelOutTypedDict + from .deltamessage import ( + DeltaMessage, + DeltaMessageContent, + DeltaMessageContentTypedDict, + DeltaMessageTypedDict, + ) + from .document import Document, DocumentTypedDict + from .documentlibrarytool import DocumentLibraryTool, DocumentLibraryToolTypedDict + from .documenttextcontent import DocumentTextContent, DocumentTextContentTypedDict + from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict + from .embeddingdtype import EmbeddingDtype + from .embeddingrequest import ( + EmbeddingRequest, + EmbeddingRequestInputs, + EmbeddingRequestInputsTypedDict, + EmbeddingRequestTypedDict, + ) + from .embeddingresponse import EmbeddingResponse, EmbeddingResponseTypedDict + from .embeddingresponsedata import ( + EmbeddingResponseData, + EmbeddingResponseDataTypedDict, + ) + from .encodingformat import EncodingFormat + from .entitytype import EntityType + from .event import Event, EventTypedDict + from .file import File, FileTypedDict + from .filechunk import FileChunk, FileChunkTypedDict + from .filepurpose import FilePurpose + from .files_api_routes_delete_fileop import ( + FilesAPIRoutesDeleteFileRequest, + FilesAPIRoutesDeleteFileRequestTypedDict, + ) + from .files_api_routes_download_fileop import ( + FilesAPIRoutesDownloadFileRequest, + FilesAPIRoutesDownloadFileRequestTypedDict, + ) + from .files_api_routes_get_signed_urlop import ( + FilesAPIRoutesGetSignedURLRequest, + FilesAPIRoutesGetSignedURLRequestTypedDict, + ) + from .files_api_routes_list_filesop import ( + FilesAPIRoutesListFilesRequest, + FilesAPIRoutesListFilesRequestTypedDict, + ) + from .files_api_routes_retrieve_fileop import ( + FilesAPIRoutesRetrieveFileRequest, + FilesAPIRoutesRetrieveFileRequestTypedDict, + ) + from .files_api_routes_upload_fileop import ( + MultiPartBodyParams, + MultiPartBodyParamsTypedDict, + ) + from .fileschema import FileSchema, FileSchemaTypedDict + from .fimcompletionrequest import ( + FIMCompletionRequest, + FIMCompletionRequestStop, + FIMCompletionRequestStopTypedDict, + FIMCompletionRequestTypedDict, + ) + from .fimcompletionresponse import ( + FIMCompletionResponse, + FIMCompletionResponseTypedDict, + ) + from .fimcompletionstreamrequest import ( + FIMCompletionStreamRequest, + FIMCompletionStreamRequestStop, + FIMCompletionStreamRequestStopTypedDict, + FIMCompletionStreamRequestTypedDict, + ) + from .finetuneablemodeltype import FineTuneableModelType + from .finetunedmodelcapabilities import ( + FineTunedModelCapabilities, + FineTunedModelCapabilitiesTypedDict, + ) + from .ftclassifierlossfunction import FTClassifierLossFunction + from .ftmodelcard import FTModelCard, FTModelCardTypedDict + from .function import Function, FunctionTypedDict + from .functioncall import ( + Arguments, + ArgumentsTypedDict, + FunctionCall, + FunctionCallTypedDict, + ) + from .functioncallentry import ( + FunctionCallEntry, + FunctionCallEntryConfirmationStatus, + FunctionCallEntryTypedDict, + ) + from .functioncallentryarguments import ( + FunctionCallEntryArguments, + FunctionCallEntryArgumentsTypedDict, + ) + from .functioncallevent import ( + FunctionCallEvent, + FunctionCallEventConfirmationStatus, + FunctionCallEventTypedDict, + ) + from .functionname import FunctionName, FunctionNameTypedDict + from .functionresultentry import FunctionResultEntry, FunctionResultEntryTypedDict + from .functiontool import FunctionTool, FunctionToolTypedDict + from .getfileresponse import GetFileResponse, GetFileResponseTypedDict + from .getsignedurlresponse import ( + GetSignedURLResponse, + GetSignedURLResponseTypedDict, + ) + from .githubrepository import GithubRepository, GithubRepositoryTypedDict + from .githubrepositoryin import GithubRepositoryIn, GithubRepositoryInTypedDict + from .imagedetail import ImageDetail + from .imagegenerationtool import ImageGenerationTool, ImageGenerationToolTypedDict + from .imageurl import ImageURL, ImageURLTypedDict + from .imageurlchunk import ( + ImageURLChunk, + ImageURLChunkTypedDict, + ImageURLUnion, + ImageURLUnionTypedDict, + ) + from .inputentries import InputEntries, InputEntriesTypedDict + from .inputs import Inputs, InputsTypedDict + from .instructrequest import ( + InstructRequest, + InstructRequestMessage, + InstructRequestMessageTypedDict, + InstructRequestTypedDict, + ) + from .jobmetadata import JobMetadata, JobMetadataTypedDict + from .jobs_api_routes_batch_cancel_batch_jobop import ( + JobsAPIRoutesBatchCancelBatchJobRequest, + JobsAPIRoutesBatchCancelBatchJobRequestTypedDict, + ) + from .jobs_api_routes_batch_get_batch_jobop import ( + JobsAPIRoutesBatchGetBatchJobRequest, + JobsAPIRoutesBatchGetBatchJobRequestTypedDict, + ) + from .jobs_api_routes_batch_get_batch_jobsop import ( + JobsAPIRoutesBatchGetBatchJobsRequest, + JobsAPIRoutesBatchGetBatchJobsRequestTypedDict, + OrderBy, + ) + from .jobs_api_routes_fine_tuning_archive_fine_tuned_modelop import ( + JobsAPIRoutesFineTuningArchiveFineTunedModelRequest, + JobsAPIRoutesFineTuningArchiveFineTunedModelRequestTypedDict, + ) + from .jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop import ( + JobsAPIRoutesFineTuningCancelFineTuningJobRequest, + JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict, + JobsAPIRoutesFineTuningCancelFineTuningJobResponse, + JobsAPIRoutesFineTuningCancelFineTuningJobResponseTypedDict, + UnknownJobsAPIRoutesFineTuningCancelFineTuningJobResponse, + ) + from .jobs_api_routes_fine_tuning_create_fine_tuning_jobop import ( + JobsAPIRoutesFineTuningCreateFineTuningJobResponse, + JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict, + Response, + ResponseTypedDict, + UnknownResponse, + ) + from .jobs_api_routes_fine_tuning_get_fine_tuning_jobop import ( + JobsAPIRoutesFineTuningGetFineTuningJobRequest, + JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict, + JobsAPIRoutesFineTuningGetFineTuningJobResponse, + JobsAPIRoutesFineTuningGetFineTuningJobResponseTypedDict, + UnknownJobsAPIRoutesFineTuningGetFineTuningJobResponse, + ) + from .jobs_api_routes_fine_tuning_get_fine_tuning_jobsop import ( + JobsAPIRoutesFineTuningGetFineTuningJobsRequest, + JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict, + JobsAPIRoutesFineTuningGetFineTuningJobsStatus, + ) + from .jobs_api_routes_fine_tuning_start_fine_tuning_jobop import ( + JobsAPIRoutesFineTuningStartFineTuningJobRequest, + JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict, + JobsAPIRoutesFineTuningStartFineTuningJobResponse, + JobsAPIRoutesFineTuningStartFineTuningJobResponseTypedDict, + UnknownJobsAPIRoutesFineTuningStartFineTuningJobResponse, + ) + from .jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop import ( + JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest, + JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequestTypedDict, + ) + from .jobs_api_routes_fine_tuning_update_fine_tuned_modelop import ( + JobsAPIRoutesFineTuningUpdateFineTunedModelRequest, + JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict, + JobsAPIRoutesFineTuningUpdateFineTunedModelResponse, + JobsAPIRoutesFineTuningUpdateFineTunedModelResponseTypedDict, + UnknownJobsAPIRoutesFineTuningUpdateFineTunedModelResponse, + ) + from .jsonschema import JSONSchema, JSONSchemaTypedDict + from .legacyjobmetadata import LegacyJobMetadata, LegacyJobMetadataTypedDict + from .libraries_delete_v1op import ( + LibrariesDeleteV1Request, + LibrariesDeleteV1RequestTypedDict, + ) + from .libraries_documents_delete_v1op import ( + LibrariesDocumentsDeleteV1Request, + LibrariesDocumentsDeleteV1RequestTypedDict, + ) + from .libraries_documents_get_extracted_text_signed_url_v1op import ( + LibrariesDocumentsGetExtractedTextSignedURLV1Request, + LibrariesDocumentsGetExtractedTextSignedURLV1RequestTypedDict, + ) + from .libraries_documents_get_signed_url_v1op import ( + LibrariesDocumentsGetSignedURLV1Request, + LibrariesDocumentsGetSignedURLV1RequestTypedDict, + ) + from .libraries_documents_get_status_v1op import ( + LibrariesDocumentsGetStatusV1Request, + LibrariesDocumentsGetStatusV1RequestTypedDict, + ) + from .libraries_documents_get_text_content_v1op import ( + LibrariesDocumentsGetTextContentV1Request, + LibrariesDocumentsGetTextContentV1RequestTypedDict, + ) + from .libraries_documents_get_v1op import ( + LibrariesDocumentsGetV1Request, + LibrariesDocumentsGetV1RequestTypedDict, + ) + from .libraries_documents_list_v1op import ( + LibrariesDocumentsListV1Request, + LibrariesDocumentsListV1RequestTypedDict, + ) + from .libraries_documents_reprocess_v1op import ( + LibrariesDocumentsReprocessV1Request, + LibrariesDocumentsReprocessV1RequestTypedDict, + ) + from .libraries_documents_update_v1op import ( + LibrariesDocumentsUpdateV1Request, + LibrariesDocumentsUpdateV1RequestTypedDict, + ) + from .libraries_documents_upload_v1op import ( + DocumentUpload, + DocumentUploadTypedDict, + LibrariesDocumentsUploadV1Request, + LibrariesDocumentsUploadV1RequestTypedDict, + ) + from .libraries_get_v1op import ( + LibrariesGetV1Request, + LibrariesGetV1RequestTypedDict, + ) + from .libraries_share_create_v1op import ( + LibrariesShareCreateV1Request, + LibrariesShareCreateV1RequestTypedDict, + ) + from .libraries_share_delete_v1op import ( + LibrariesShareDeleteV1Request, + LibrariesShareDeleteV1RequestTypedDict, + ) + from .libraries_share_list_v1op import ( + LibrariesShareListV1Request, + LibrariesShareListV1RequestTypedDict, + ) + from .libraries_update_v1op import ( + LibrariesUpdateV1Request, + LibrariesUpdateV1RequestTypedDict, + ) + from .library import Library, LibraryTypedDict + from .listbatchjobsresponse import ( + ListBatchJobsResponse, + ListBatchJobsResponseTypedDict, + ) + from .listdocumentsresponse import ( + ListDocumentsResponse, + ListDocumentsResponseTypedDict, + ) + from .listfilesresponse import ListFilesResponse, ListFilesResponseTypedDict + from .listfinetuningjobsresponse import ( + ListFineTuningJobsResponse, + ListFineTuningJobsResponseData, + ListFineTuningJobsResponseDataTypedDict, + ListFineTuningJobsResponseTypedDict, + UnknownListFineTuningJobsResponseData, + ) + from .listlibrariesresponse import ( + ListLibrariesResponse, + ListLibrariesResponseTypedDict, + ) + from .listsharingout import ListSharingOut, ListSharingOutTypedDict + from .messageentries import MessageEntries, MessageEntriesTypedDict + from .messageinputcontentchunks import ( + MessageInputContentChunks, + MessageInputContentChunksTypedDict, + ) + from .messageinputentry import ( + MessageInputEntry, + MessageInputEntryContent, + MessageInputEntryContentTypedDict, + MessageInputEntryTypedDict, + Role, + ) + from .messageoutputcontentchunks import ( + MessageOutputContentChunks, + MessageOutputContentChunksTypedDict, + ) + from .messageoutputentry import ( + MessageOutputEntry, + MessageOutputEntryContent, + MessageOutputEntryContentTypedDict, + MessageOutputEntryTypedDict, + ) + from .messageoutputevent import ( + MessageOutputEvent, + MessageOutputEventContent, + MessageOutputEventContentTypedDict, + MessageOutputEventTypedDict, + ) + from .metric import Metric, MetricTypedDict + from .mistralpromptmode import MistralPromptMode + from .modelcapabilities import ModelCapabilities, ModelCapabilitiesTypedDict + from .modelconversation import ( + ModelConversation, + ModelConversationTool, + ModelConversationToolTypedDict, + ModelConversationTypedDict, + UnknownModelConversationTool, + ) + from .modellist import ( + ModelList, + ModelListData, + ModelListDataTypedDict, + ModelListTypedDict, + UnknownModelListData, + ) + from .moderationobject import ModerationObject, ModerationObjectTypedDict + from .moderationresponse import ModerationResponse, ModerationResponseTypedDict + from .ocrimageobject import OCRImageObject, OCRImageObjectTypedDict + from .ocrpagedimensions import OCRPageDimensions, OCRPageDimensionsTypedDict + from .ocrpageobject import OCRPageObject, OCRPageObjectTypedDict + from .ocrrequest import ( + DocumentUnion, + DocumentUnionTypedDict, + OCRRequest, + OCRRequestTypedDict, + TableFormat, + ) + from .ocrresponse import OCRResponse, OCRResponseTypedDict + from .ocrtableobject import Format, OCRTableObject, OCRTableObjectTypedDict + from .ocrusageinfo import OCRUsageInfo, OCRUsageInfoTypedDict + from .outputcontentchunks import OutputContentChunks, OutputContentChunksTypedDict + from .paginationinfo import PaginationInfo, PaginationInfoTypedDict + from .prediction import Prediction, PredictionTypedDict + from .processingstatusout import ProcessingStatusOut, ProcessingStatusOutTypedDict + from .processstatus import ProcessStatus + from .realtimetranscriptionerror import ( + RealtimeTranscriptionError, + RealtimeTranscriptionErrorTypedDict, + ) + from .realtimetranscriptionerrordetail import ( + RealtimeTranscriptionErrorDetail, + RealtimeTranscriptionErrorDetailMessage, + RealtimeTranscriptionErrorDetailMessageTypedDict, + RealtimeTranscriptionErrorDetailTypedDict, + ) + from .realtimetranscriptioninputaudioappend import ( + RealtimeTranscriptionInputAudioAppend, + RealtimeTranscriptionInputAudioAppendTypedDict, + ) + from .realtimetranscriptioninputaudioend import ( + RealtimeTranscriptionInputAudioEnd, + RealtimeTranscriptionInputAudioEndTypedDict, + ) + from .realtimetranscriptioninputaudioflush import ( + RealtimeTranscriptionInputAudioFlush, + RealtimeTranscriptionInputAudioFlushTypedDict, + ) + from .realtimetranscriptionsession import ( + RealtimeTranscriptionSession, + RealtimeTranscriptionSessionTypedDict, + ) + from .realtimetranscriptionsessioncreated import ( + RealtimeTranscriptionSessionCreated, + RealtimeTranscriptionSessionCreatedTypedDict, + ) + from .realtimetranscriptionsessionupdated import ( + RealtimeTranscriptionSessionUpdated, + RealtimeTranscriptionSessionUpdatedTypedDict, + ) + from .realtimetranscriptionsessionupdatemessage import ( + RealtimeTranscriptionSessionUpdateMessage, + RealtimeTranscriptionSessionUpdateMessageTypedDict, + ) + from .realtimetranscriptionsessionupdatepayload import ( + RealtimeTranscriptionSessionUpdatePayload, + RealtimeTranscriptionSessionUpdatePayloadTypedDict, + ) + from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict + from .requestsource import RequestSource + from .responsedoneevent import ResponseDoneEvent, ResponseDoneEventTypedDict + from .responseerrorevent import ResponseErrorEvent, ResponseErrorEventTypedDict + from .responseformat import ResponseFormat, ResponseFormatTypedDict + from .responseformats import ResponseFormats + from .responsestartedevent import ( + ResponseStartedEvent, + ResponseStartedEventTypedDict, + ) + from .retrieve_model_v1_models_model_id_getop import ( + ResponseRetrieveModelV1ModelsModelIDGet, + ResponseRetrieveModelV1ModelsModelIDGetTypedDict, + RetrieveModelV1ModelsModelIDGetRequest, + RetrieveModelV1ModelsModelIDGetRequestTypedDict, + UnknownResponseRetrieveModelV1ModelsModelIDGet, + ) + from .sampletype import SampleType + from .security import Security, SecurityTypedDict + from .shareenum import ShareEnum + from .sharingdelete import SharingDelete, SharingDeleteTypedDict + from .sharingin import SharingIn, SharingInTypedDict + from .sharingout import SharingOut, SharingOutTypedDict + from .source import Source + from .ssetypes import SSETypes + from .systemmessage import ( + SystemMessage, + SystemMessageContent, + SystemMessageContentTypedDict, + SystemMessageTypedDict, + ) + from .systemmessagecontentchunks import ( + SystemMessageContentChunks, + SystemMessageContentChunksTypedDict, + ) + from .textchunk import TextChunk, TextChunkTypedDict + from .thinkchunk import ( + ThinkChunk, + ThinkChunkThinking, + ThinkChunkThinkingTypedDict, + ThinkChunkTypedDict, + ) + from .timestampgranularity import TimestampGranularity + from .tool import Tool, ToolTypedDict + from .toolcall import ToolCall, ToolCallTypedDict + from .toolcallconfirmation import ( + Confirmation, + ToolCallConfirmation, + ToolCallConfirmationTypedDict, + ) + from .toolchoice import ToolChoice, ToolChoiceTypedDict + from .toolchoiceenum import ToolChoiceEnum + from .toolconfiguration import ToolConfiguration, ToolConfigurationTypedDict + from .toolexecutiondeltaevent import ( + ToolExecutionDeltaEvent, + ToolExecutionDeltaEventName, + ToolExecutionDeltaEventNameTypedDict, + ToolExecutionDeltaEventTypedDict, + ) + from .toolexecutiondoneevent import ( + ToolExecutionDoneEvent, + ToolExecutionDoneEventName, + ToolExecutionDoneEventNameTypedDict, + ToolExecutionDoneEventTypedDict, + ) + from .toolexecutionentry import ( + ToolExecutionEntry, + ToolExecutionEntryName, + ToolExecutionEntryNameTypedDict, + ToolExecutionEntryTypedDict, + ) + from .toolexecutionstartedevent import ( + ToolExecutionStartedEvent, + ToolExecutionStartedEventName, + ToolExecutionStartedEventNameTypedDict, + ToolExecutionStartedEventTypedDict, + ) + from .toolfilechunk import ( + ToolFileChunk, + ToolFileChunkTool, + ToolFileChunkToolTypedDict, + ToolFileChunkTypedDict, + ) + from .toolmessage import ( + ToolMessage, + ToolMessageContent, + ToolMessageContentTypedDict, + ToolMessageTypedDict, + ) + from .toolreferencechunk import ( + ToolReferenceChunk, + ToolReferenceChunkTool, + ToolReferenceChunkToolTypedDict, + ToolReferenceChunkTypedDict, + ) + from .tooltypes import ToolTypes + from .trainingfile import TrainingFile, TrainingFileTypedDict + from .transcriptionresponse import ( + TranscriptionResponse, + TranscriptionResponseTypedDict, + ) + from .transcriptionsegmentchunk import ( + TranscriptionSegmentChunk, + TranscriptionSegmentChunkTypedDict, + ) + from .transcriptionstreamdone import ( + TranscriptionStreamDone, + TranscriptionStreamDoneTypedDict, + ) + from .transcriptionstreamevents import ( + TranscriptionStreamEvents, + TranscriptionStreamEventsData, + TranscriptionStreamEventsDataTypedDict, + TranscriptionStreamEventsTypedDict, + UnknownTranscriptionStreamEventsData, + ) + from .transcriptionstreameventtypes import TranscriptionStreamEventTypes + from .transcriptionstreamlanguage import ( + TranscriptionStreamLanguage, + TranscriptionStreamLanguageTypedDict, + ) + from .transcriptionstreamsegmentdelta import ( + TranscriptionStreamSegmentDelta, + TranscriptionStreamSegmentDeltaTypedDict, + ) + from .transcriptionstreamtextdelta import ( + TranscriptionStreamTextDelta, + TranscriptionStreamTextDeltaTypedDict, + ) + from .unarchivemodelresponse import ( + UnarchiveModelResponse, + UnarchiveModelResponseTypedDict, + ) + from .updateagentrequest import ( + UpdateAgentRequest, + UpdateAgentRequestTool, + UpdateAgentRequestToolTypedDict, + UpdateAgentRequestTypedDict, + ) + from .updatedocumentrequest import ( + Attributes, + AttributesTypedDict, + UpdateDocumentRequest, + UpdateDocumentRequestTypedDict, + ) + from .updatelibraryrequest import ( + UpdateLibraryRequest, + UpdateLibraryRequestTypedDict, + ) + from .updatemodelrequest import UpdateModelRequest, UpdateModelRequestTypedDict + from .usageinfo import UsageInfo, UsageInfoTypedDict + from .usermessage import ( + UserMessage, + UserMessageContent, + UserMessageContentTypedDict, + UserMessageTypedDict, + ) + from .validationerror import ( + Loc, + LocTypedDict, + ValidationError, + ValidationErrorTypedDict, + ) + from .wandbintegration import WandbIntegration, WandbIntegrationTypedDict + from .wandbintegrationresult import ( + WandbIntegrationResult, + WandbIntegrationResultTypedDict, + ) + from .websearchpremiumtool import ( + WebSearchPremiumTool, + WebSearchPremiumToolTypedDict, + ) + from .websearchtool import WebSearchTool, WebSearchToolTypedDict + +__all__ = [ + "APIEndpoint", + "Agent", + "AgentAliasResponse", + "AgentAliasResponseTypedDict", + "AgentConversation", + "AgentConversationAgentVersion", + "AgentConversationAgentVersionTypedDict", + "AgentConversationTypedDict", + "AgentHandoffDoneEvent", + "AgentHandoffDoneEventTypedDict", + "AgentHandoffEntry", + "AgentHandoffEntryTypedDict", + "AgentHandoffStartedEvent", + "AgentHandoffStartedEventTypedDict", + "AgentTool", + "AgentToolTypedDict", + "AgentTypedDict", + "AgentsAPIV1AgentsCreateOrUpdateAliasRequest", + "AgentsAPIV1AgentsCreateOrUpdateAliasRequestTypedDict", + "AgentsAPIV1AgentsDeleteAliasRequest", + "AgentsAPIV1AgentsDeleteAliasRequestTypedDict", + "AgentsAPIV1AgentsDeleteRequest", + "AgentsAPIV1AgentsDeleteRequestTypedDict", + "AgentsAPIV1AgentsGetAgentVersion", + "AgentsAPIV1AgentsGetAgentVersionTypedDict", + "AgentsAPIV1AgentsGetRequest", + "AgentsAPIV1AgentsGetRequestTypedDict", + "AgentsAPIV1AgentsGetVersionRequest", + "AgentsAPIV1AgentsGetVersionRequestTypedDict", + "AgentsAPIV1AgentsListRequest", + "AgentsAPIV1AgentsListRequestTypedDict", + "AgentsAPIV1AgentsListVersionAliasesRequest", + "AgentsAPIV1AgentsListVersionAliasesRequestTypedDict", + "AgentsAPIV1AgentsListVersionsRequest", + "AgentsAPIV1AgentsListVersionsRequestTypedDict", + "AgentsAPIV1AgentsUpdateRequest", + "AgentsAPIV1AgentsUpdateRequestTypedDict", + "AgentsAPIV1AgentsUpdateVersionRequest", + "AgentsAPIV1AgentsUpdateVersionRequestTypedDict", + "AgentsAPIV1ConversationsAppendRequest", + "AgentsAPIV1ConversationsAppendRequestTypedDict", + "AgentsAPIV1ConversationsAppendStreamRequest", + "AgentsAPIV1ConversationsAppendStreamRequestTypedDict", + "AgentsAPIV1ConversationsDeleteRequest", + "AgentsAPIV1ConversationsDeleteRequestTypedDict", + "AgentsAPIV1ConversationsGetRequest", + "AgentsAPIV1ConversationsGetRequestTypedDict", + "AgentsAPIV1ConversationsHistoryRequest", + "AgentsAPIV1ConversationsHistoryRequestTypedDict", + "AgentsAPIV1ConversationsListRequest", + "AgentsAPIV1ConversationsListRequestTypedDict", + "AgentsAPIV1ConversationsListResponse", + "AgentsAPIV1ConversationsListResponseTypedDict", + "AgentsAPIV1ConversationsMessagesRequest", + "AgentsAPIV1ConversationsMessagesRequestTypedDict", + "AgentsAPIV1ConversationsRestartRequest", + "AgentsAPIV1ConversationsRestartRequestTypedDict", + "AgentsAPIV1ConversationsRestartStreamRequest", + "AgentsAPIV1ConversationsRestartStreamRequestTypedDict", + "AgentsCompletionRequest", + "AgentsCompletionRequestMessage", + "AgentsCompletionRequestMessageTypedDict", + "AgentsCompletionRequestStop", + "AgentsCompletionRequestStopTypedDict", + "AgentsCompletionRequestToolChoice", + "AgentsCompletionRequestToolChoiceTypedDict", + "AgentsCompletionRequestTypedDict", + "AgentsCompletionStreamRequest", + "AgentsCompletionStreamRequestMessage", + "AgentsCompletionStreamRequestMessageTypedDict", + "AgentsCompletionStreamRequestStop", + "AgentsCompletionStreamRequestStopTypedDict", + "AgentsCompletionStreamRequestToolChoice", + "AgentsCompletionStreamRequestToolChoiceTypedDict", + "AgentsCompletionStreamRequestTypedDict", + "ArchiveModelResponse", + "ArchiveModelResponseTypedDict", + "Arguments", + "ArgumentsTypedDict", + "AssistantMessage", + "AssistantMessageContent", + "AssistantMessageContentTypedDict", + "AssistantMessageTypedDict", + "Attributes", + "AttributesTypedDict", + "AudioChunk", + "AudioChunkTypedDict", + "AudioEncoding", + "AudioFormat", + "AudioFormatTypedDict", + "AudioTranscriptionRequest", + "AudioTranscriptionRequestStream", + "AudioTranscriptionRequestStreamTypedDict", + "AudioTranscriptionRequestTypedDict", + "BaseModelCard", + "BaseModelCardTypedDict", + "BatchError", + "BatchErrorTypedDict", + "BatchJob", + "BatchJobStatus", + "BatchJobTypedDict", + "BatchRequest", + "BatchRequestTypedDict", + "BuiltInConnectors", + "ChatClassificationRequest", + "ChatClassificationRequestTypedDict", + "ChatCompletionChoice", + "ChatCompletionChoiceFinishReason", + "ChatCompletionChoiceTypedDict", + "ChatCompletionRequest", + "ChatCompletionRequestMessage", + "ChatCompletionRequestMessageTypedDict", + "ChatCompletionRequestStop", + "ChatCompletionRequestStopTypedDict", + "ChatCompletionRequestToolChoice", + "ChatCompletionRequestToolChoiceTypedDict", + "ChatCompletionRequestTypedDict", + "ChatCompletionResponse", + "ChatCompletionResponseTypedDict", + "ChatCompletionStreamRequest", + "ChatCompletionStreamRequestMessage", + "ChatCompletionStreamRequestMessageTypedDict", + "ChatCompletionStreamRequestStop", + "ChatCompletionStreamRequestStopTypedDict", + "ChatCompletionStreamRequestToolChoice", + "ChatCompletionStreamRequestToolChoiceTypedDict", + "ChatCompletionStreamRequestTypedDict", + "ChatModerationRequest", + "ChatModerationRequestInputs1", + "ChatModerationRequestInputs1TypedDict", + "ChatModerationRequestInputs2", + "ChatModerationRequestInputs2TypedDict", + "ChatModerationRequestInputs3", + "ChatModerationRequestInputs3TypedDict", + "ChatModerationRequestTypedDict", + "Checkpoint", + "CheckpointTypedDict", + "ClassificationRequest", + "ClassificationRequestInputs", + "ClassificationRequestInputsTypedDict", + "ClassificationRequestTypedDict", + "ClassificationResponse", + "ClassificationResponseTypedDict", + "ClassificationTargetResult", + "ClassificationTargetResultTypedDict", + "ClassifierFineTunedModel", + "ClassifierFineTunedModelTypedDict", + "ClassifierFineTuningJob", + "ClassifierFineTuningJobDetails", + "ClassifierFineTuningJobDetailsIntegration", + "ClassifierFineTuningJobDetailsIntegrationTypedDict", + "ClassifierFineTuningJobDetailsStatus", + "ClassifierFineTuningJobDetailsTypedDict", + "ClassifierFineTuningJobIntegration", + "ClassifierFineTuningJobIntegrationTypedDict", + "ClassifierFineTuningJobStatus", + "ClassifierFineTuningJobTypedDict", + "ClassifierTarget", + "ClassifierTargetResult", + "ClassifierTargetResultTypedDict", + "ClassifierTargetTypedDict", + "ClassifierTrainingParameters", + "ClassifierTrainingParametersTypedDict", + "CodeInterpreterTool", + "CodeInterpreterToolTypedDict", + "CompletionArgs", + "CompletionArgsStop", + "CompletionArgsStopTypedDict", + "CompletionArgsTypedDict", + "CompletionChunk", + "CompletionChunkTypedDict", + "CompletionEvent", + "CompletionEventTypedDict", + "CompletionFineTunedModel", + "CompletionFineTunedModelTypedDict", + "CompletionFineTuningJob", + "CompletionFineTuningJobDetails", + "CompletionFineTuningJobDetailsIntegration", + "CompletionFineTuningJobDetailsIntegrationTypedDict", + "CompletionFineTuningJobDetailsRepository", + "CompletionFineTuningJobDetailsRepositoryTypedDict", + "CompletionFineTuningJobDetailsStatus", + "CompletionFineTuningJobDetailsTypedDict", + "CompletionFineTuningJobIntegration", + "CompletionFineTuningJobIntegrationTypedDict", + "CompletionFineTuningJobRepository", + "CompletionFineTuningJobRepositoryTypedDict", + "CompletionFineTuningJobStatus", + "CompletionFineTuningJobTypedDict", + "CompletionResponseStreamChoice", + "CompletionResponseStreamChoiceFinishReason", + "CompletionResponseStreamChoiceTypedDict", + "CompletionTrainingParameters", + "CompletionTrainingParametersTypedDict", + "Confirmation", + "ContentChunk", + "ContentChunkTypedDict", + "ConversationAppendRequest", + "ConversationAppendRequestHandoffExecution", + "ConversationAppendRequestTypedDict", + "ConversationAppendStreamRequest", + "ConversationAppendStreamRequestHandoffExecution", + "ConversationAppendStreamRequestTypedDict", + "ConversationEvents", + "ConversationEventsData", + "ConversationEventsDataTypedDict", + "ConversationEventsTypedDict", + "ConversationHistory", + "ConversationHistoryTypedDict", + "ConversationInputs", + "ConversationInputsTypedDict", + "ConversationMessages", + "ConversationMessagesTypedDict", + "ConversationRequest", + "ConversationRequestAgentVersion", + "ConversationRequestAgentVersionTypedDict", + "ConversationRequestHandoffExecution", + "ConversationRequestTool", + "ConversationRequestToolTypedDict", + "ConversationRequestTypedDict", + "ConversationResponse", + "ConversationResponseTypedDict", + "ConversationRestartRequest", + "ConversationRestartRequestAgentVersion", + "ConversationRestartRequestAgentVersionTypedDict", + "ConversationRestartRequestHandoffExecution", + "ConversationRestartRequestTypedDict", + "ConversationRestartStreamRequest", + "ConversationRestartStreamRequestAgentVersion", + "ConversationRestartStreamRequestAgentVersionTypedDict", + "ConversationRestartStreamRequestHandoffExecution", + "ConversationRestartStreamRequestTypedDict", + "ConversationStreamRequest", + "ConversationStreamRequestAgentVersion", + "ConversationStreamRequestAgentVersionTypedDict", + "ConversationStreamRequestHandoffExecution", + "ConversationStreamRequestTool", + "ConversationStreamRequestToolTypedDict", + "ConversationStreamRequestTypedDict", + "ConversationThinkChunk", + "ConversationThinkChunkThinking", + "ConversationThinkChunkThinkingTypedDict", + "ConversationThinkChunkTypedDict", + "ConversationUsageInfo", + "ConversationUsageInfoTypedDict", + "CreateAgentRequest", + "CreateAgentRequestTool", + "CreateAgentRequestToolTypedDict", + "CreateAgentRequestTypedDict", + "CreateBatchJobRequest", + "CreateBatchJobRequestTypedDict", + "CreateFileResponse", + "CreateFileResponseTypedDict", + "CreateFineTuningJobRequest", + "CreateFineTuningJobRequestIntegration", + "CreateFineTuningJobRequestIntegrationTypedDict", + "CreateFineTuningJobRequestRepository", + "CreateFineTuningJobRequestRepositoryTypedDict", + "CreateFineTuningJobRequestTypedDict", + "CreateLibraryRequest", + "CreateLibraryRequestTypedDict", + "DeleteFileResponse", + "DeleteFileResponseTypedDict", + "DeleteModelOut", + "DeleteModelOutTypedDict", + "DeleteModelV1ModelsModelIDDeleteRequest", + "DeleteModelV1ModelsModelIDDeleteRequestTypedDict", + "DeltaMessage", + "DeltaMessageContent", + "DeltaMessageContentTypedDict", + "DeltaMessageTypedDict", + "Document", + "DocumentLibraryTool", + "DocumentLibraryToolTypedDict", + "DocumentTextContent", + "DocumentTextContentTypedDict", + "DocumentTypedDict", + "DocumentURLChunk", + "DocumentURLChunkTypedDict", + "DocumentUnion", + "DocumentUnionTypedDict", + "DocumentUpload", + "DocumentUploadTypedDict", + "EmbeddingDtype", + "EmbeddingRequest", + "EmbeddingRequestInputs", + "EmbeddingRequestInputsTypedDict", + "EmbeddingRequestTypedDict", + "EmbeddingResponse", + "EmbeddingResponseData", + "EmbeddingResponseDataTypedDict", + "EmbeddingResponseTypedDict", + "EncodingFormat", + "EntityType", + "Entry", + "EntryTypedDict", + "Event", + "EventTypedDict", + "FIMCompletionRequest", + "FIMCompletionRequestStop", + "FIMCompletionRequestStopTypedDict", + "FIMCompletionRequestTypedDict", + "FIMCompletionResponse", + "FIMCompletionResponseTypedDict", + "FIMCompletionStreamRequest", + "FIMCompletionStreamRequestStop", + "FIMCompletionStreamRequestStopTypedDict", + "FIMCompletionStreamRequestTypedDict", + "FTClassifierLossFunction", + "FTModelCard", + "FTModelCardTypedDict", + "File", + "FileChunk", + "FileChunkTypedDict", + "FilePurpose", + "FileSchema", + "FileSchemaTypedDict", + "FileTypedDict", + "FilesAPIRoutesDeleteFileRequest", + "FilesAPIRoutesDeleteFileRequestTypedDict", + "FilesAPIRoutesDownloadFileRequest", + "FilesAPIRoutesDownloadFileRequestTypedDict", + "FilesAPIRoutesGetSignedURLRequest", + "FilesAPIRoutesGetSignedURLRequestTypedDict", + "FilesAPIRoutesListFilesRequest", + "FilesAPIRoutesListFilesRequestTypedDict", + "FilesAPIRoutesRetrieveFileRequest", + "FilesAPIRoutesRetrieveFileRequestTypedDict", + "FineTuneableModelType", + "FineTunedModelCapabilities", + "FineTunedModelCapabilitiesTypedDict", + "Format", + "Function", + "FunctionCall", + "FunctionCallEntry", + "FunctionCallEntryArguments", + "FunctionCallEntryArgumentsTypedDict", + "FunctionCallEntryConfirmationStatus", + "FunctionCallEntryTypedDict", + "FunctionCallEvent", + "FunctionCallEventConfirmationStatus", + "FunctionCallEventTypedDict", + "FunctionCallTypedDict", + "FunctionName", + "FunctionNameTypedDict", + "FunctionResultEntry", + "FunctionResultEntryTypedDict", + "FunctionTool", + "FunctionToolTypedDict", + "FunctionTypedDict", + "GetFileResponse", + "GetFileResponseTypedDict", + "GetSignedURLResponse", + "GetSignedURLResponseTypedDict", + "GithubRepository", + "GithubRepositoryIn", + "GithubRepositoryInTypedDict", + "GithubRepositoryTypedDict", + "Hyperparameters", + "HyperparametersTypedDict", + "ImageDetail", + "ImageGenerationTool", + "ImageGenerationToolTypedDict", + "ImageURL", + "ImageURLChunk", + "ImageURLChunkTypedDict", + "ImageURLTypedDict", + "ImageURLUnion", + "ImageURLUnionTypedDict", + "InputEntries", + "InputEntriesTypedDict", + "Inputs", + "InputsTypedDict", + "InstructRequest", + "InstructRequestMessage", + "InstructRequestMessageTypedDict", + "InstructRequestTypedDict", + "JSONSchema", + "JSONSchemaTypedDict", + "JobMetadata", + "JobMetadataTypedDict", + "JobsAPIRoutesBatchCancelBatchJobRequest", + "JobsAPIRoutesBatchCancelBatchJobRequestTypedDict", + "JobsAPIRoutesBatchGetBatchJobRequest", + "JobsAPIRoutesBatchGetBatchJobRequestTypedDict", + "JobsAPIRoutesBatchGetBatchJobsRequest", + "JobsAPIRoutesBatchGetBatchJobsRequestTypedDict", + "JobsAPIRoutesFineTuningArchiveFineTunedModelRequest", + "JobsAPIRoutesFineTuningArchiveFineTunedModelRequestTypedDict", + "JobsAPIRoutesFineTuningCancelFineTuningJobRequest", + "JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict", + "JobsAPIRoutesFineTuningCancelFineTuningJobResponse", + "JobsAPIRoutesFineTuningCancelFineTuningJobResponseTypedDict", + "JobsAPIRoutesFineTuningCreateFineTuningJobResponse", + "JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict", + "JobsAPIRoutesFineTuningGetFineTuningJobRequest", + "JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict", + "JobsAPIRoutesFineTuningGetFineTuningJobResponse", + "JobsAPIRoutesFineTuningGetFineTuningJobResponseTypedDict", + "JobsAPIRoutesFineTuningGetFineTuningJobsRequest", + "JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict", + "JobsAPIRoutesFineTuningGetFineTuningJobsStatus", + "JobsAPIRoutesFineTuningStartFineTuningJobRequest", + "JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict", + "JobsAPIRoutesFineTuningStartFineTuningJobResponse", + "JobsAPIRoutesFineTuningStartFineTuningJobResponseTypedDict", + "JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest", + "JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequestTypedDict", + "JobsAPIRoutesFineTuningUpdateFineTunedModelRequest", + "JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict", + "JobsAPIRoutesFineTuningUpdateFineTunedModelResponse", + "JobsAPIRoutesFineTuningUpdateFineTunedModelResponseTypedDict", + "LegacyJobMetadata", + "LegacyJobMetadataTypedDict", + "LibrariesDeleteV1Request", + "LibrariesDeleteV1RequestTypedDict", + "LibrariesDocumentsDeleteV1Request", + "LibrariesDocumentsDeleteV1RequestTypedDict", + "LibrariesDocumentsGetExtractedTextSignedURLV1Request", + "LibrariesDocumentsGetExtractedTextSignedURLV1RequestTypedDict", + "LibrariesDocumentsGetSignedURLV1Request", + "LibrariesDocumentsGetSignedURLV1RequestTypedDict", + "LibrariesDocumentsGetStatusV1Request", + "LibrariesDocumentsGetStatusV1RequestTypedDict", + "LibrariesDocumentsGetTextContentV1Request", + "LibrariesDocumentsGetTextContentV1RequestTypedDict", + "LibrariesDocumentsGetV1Request", + "LibrariesDocumentsGetV1RequestTypedDict", + "LibrariesDocumentsListV1Request", + "LibrariesDocumentsListV1RequestTypedDict", + "LibrariesDocumentsReprocessV1Request", + "LibrariesDocumentsReprocessV1RequestTypedDict", + "LibrariesDocumentsUpdateV1Request", + "LibrariesDocumentsUpdateV1RequestTypedDict", + "LibrariesDocumentsUploadV1Request", + "LibrariesDocumentsUploadV1RequestTypedDict", + "LibrariesGetV1Request", + "LibrariesGetV1RequestTypedDict", + "LibrariesShareCreateV1Request", + "LibrariesShareCreateV1RequestTypedDict", + "LibrariesShareDeleteV1Request", + "LibrariesShareDeleteV1RequestTypedDict", + "LibrariesShareListV1Request", + "LibrariesShareListV1RequestTypedDict", + "LibrariesUpdateV1Request", + "LibrariesUpdateV1RequestTypedDict", + "Library", + "LibraryTypedDict", + "ListBatchJobsResponse", + "ListBatchJobsResponseTypedDict", + "ListDocumentsResponse", + "ListDocumentsResponseTypedDict", + "ListFilesResponse", + "ListFilesResponseTypedDict", + "ListFineTuningJobsResponse", + "ListFineTuningJobsResponseData", + "ListFineTuningJobsResponseDataTypedDict", + "ListFineTuningJobsResponseTypedDict", + "ListLibrariesResponse", + "ListLibrariesResponseTypedDict", + "ListSharingOut", + "ListSharingOutTypedDict", + "Loc", + "LocTypedDict", + "MessageEntries", + "MessageEntriesTypedDict", + "MessageInputContentChunks", + "MessageInputContentChunksTypedDict", + "MessageInputEntry", + "MessageInputEntryContent", + "MessageInputEntryContentTypedDict", + "MessageInputEntryTypedDict", + "MessageOutputContentChunks", + "MessageOutputContentChunksTypedDict", + "MessageOutputEntry", + "MessageOutputEntryContent", + "MessageOutputEntryContentTypedDict", + "MessageOutputEntryTypedDict", + "MessageOutputEvent", + "MessageOutputEventContent", + "MessageOutputEventContentTypedDict", + "MessageOutputEventTypedDict", + "Metric", + "MetricTypedDict", + "MistralPromptMode", + "ModelCapabilities", + "ModelCapabilitiesTypedDict", + "ModelConversation", + "ModelConversationTool", + "ModelConversationToolTypedDict", + "ModelConversationTypedDict", + "ModelList", + "ModelListData", + "ModelListDataTypedDict", + "ModelListTypedDict", + "ModerationObject", + "ModerationObjectTypedDict", + "ModerationResponse", + "ModerationResponseTypedDict", + "MultiPartBodyParams", + "MultiPartBodyParamsTypedDict", + "OCRImageObject", + "OCRImageObjectTypedDict", + "OCRPageDimensions", + "OCRPageDimensionsTypedDict", + "OCRPageObject", + "OCRPageObjectTypedDict", + "OCRRequest", + "OCRRequestTypedDict", + "OCRResponse", + "OCRResponseTypedDict", + "OCRTableObject", + "OCRTableObjectTypedDict", + "OCRUsageInfo", + "OCRUsageInfoTypedDict", + "OrderBy", + "Output", + "OutputContentChunks", + "OutputContentChunksTypedDict", + "OutputTypedDict", + "PaginationInfo", + "PaginationInfoTypedDict", + "Prediction", + "PredictionTypedDict", + "ProcessStatus", + "ProcessingStatusOut", + "ProcessingStatusOutTypedDict", + "RealtimeTranscriptionError", + "RealtimeTranscriptionErrorDetail", + "RealtimeTranscriptionErrorDetailMessage", + "RealtimeTranscriptionErrorDetailMessageTypedDict", + "RealtimeTranscriptionErrorDetailTypedDict", + "RealtimeTranscriptionErrorTypedDict", + "RealtimeTranscriptionInputAudioAppend", + "RealtimeTranscriptionInputAudioAppendTypedDict", + "RealtimeTranscriptionInputAudioEnd", + "RealtimeTranscriptionInputAudioEndTypedDict", + "RealtimeTranscriptionInputAudioFlush", + "RealtimeTranscriptionInputAudioFlushTypedDict", + "RealtimeTranscriptionSession", + "RealtimeTranscriptionSessionCreated", + "RealtimeTranscriptionSessionCreatedTypedDict", + "RealtimeTranscriptionSessionTypedDict", + "RealtimeTranscriptionSessionUpdateMessage", + "RealtimeTranscriptionSessionUpdateMessageTypedDict", + "RealtimeTranscriptionSessionUpdatePayload", + "RealtimeTranscriptionSessionUpdatePayloadTypedDict", + "RealtimeTranscriptionSessionUpdated", + "RealtimeTranscriptionSessionUpdatedTypedDict", + "ReferenceChunk", + "ReferenceChunkTypedDict", + "RequestSource", + "Response", + "ResponseDoneEvent", + "ResponseDoneEventTypedDict", + "ResponseErrorEvent", + "ResponseErrorEventTypedDict", + "ResponseFormat", + "ResponseFormatTypedDict", + "ResponseFormats", + "ResponseRetrieveModelV1ModelsModelIDGet", + "ResponseRetrieveModelV1ModelsModelIDGetTypedDict", + "ResponseStartedEvent", + "ResponseStartedEventTypedDict", + "ResponseTypedDict", + "ResponseV1ConversationsGet", + "ResponseV1ConversationsGetTypedDict", + "RetrieveModelV1ModelsModelIDGetRequest", + "RetrieveModelV1ModelsModelIDGetRequestTypedDict", + "Role", + "SSETypes", + "SampleType", + "Security", + "SecurityTypedDict", + "ShareEnum", + "SharingDelete", + "SharingDeleteTypedDict", + "SharingIn", + "SharingInTypedDict", + "SharingOut", + "SharingOutTypedDict", + "Source", + "SystemMessage", + "SystemMessageContent", + "SystemMessageContentChunks", + "SystemMessageContentChunksTypedDict", + "SystemMessageContentTypedDict", + "SystemMessageTypedDict", + "TableFormat", + "TextChunk", + "TextChunkTypedDict", + "ThinkChunk", + "ThinkChunkThinking", + "ThinkChunkThinkingTypedDict", + "ThinkChunkTypedDict", + "TimestampGranularity", + "Tool", + "ToolCall", + "ToolCallConfirmation", + "ToolCallConfirmationTypedDict", + "ToolCallTypedDict", + "ToolChoice", + "ToolChoiceEnum", + "ToolChoiceTypedDict", + "ToolConfiguration", + "ToolConfigurationTypedDict", + "ToolExecutionDeltaEvent", + "ToolExecutionDeltaEventName", + "ToolExecutionDeltaEventNameTypedDict", + "ToolExecutionDeltaEventTypedDict", + "ToolExecutionDoneEvent", + "ToolExecutionDoneEventName", + "ToolExecutionDoneEventNameTypedDict", + "ToolExecutionDoneEventTypedDict", + "ToolExecutionEntry", + "ToolExecutionEntryName", + "ToolExecutionEntryNameTypedDict", + "ToolExecutionEntryTypedDict", + "ToolExecutionStartedEvent", + "ToolExecutionStartedEventName", + "ToolExecutionStartedEventNameTypedDict", + "ToolExecutionStartedEventTypedDict", + "ToolFileChunk", + "ToolFileChunkTool", + "ToolFileChunkToolTypedDict", + "ToolFileChunkTypedDict", + "ToolMessage", + "ToolMessageContent", + "ToolMessageContentTypedDict", + "ToolMessageTypedDict", + "ToolReferenceChunk", + "ToolReferenceChunkTool", + "ToolReferenceChunkToolTypedDict", + "ToolReferenceChunkTypedDict", + "ToolTypedDict", + "ToolTypes", + "TrainingFile", + "TrainingFileTypedDict", + "TranscriptionResponse", + "TranscriptionResponseTypedDict", + "TranscriptionSegmentChunk", + "TranscriptionSegmentChunkTypedDict", + "TranscriptionStreamDone", + "TranscriptionStreamDoneTypedDict", + "TranscriptionStreamEventTypes", + "TranscriptionStreamEvents", + "TranscriptionStreamEventsData", + "TranscriptionStreamEventsDataTypedDict", + "TranscriptionStreamEventsTypedDict", + "TranscriptionStreamLanguage", + "TranscriptionStreamLanguageTypedDict", + "TranscriptionStreamSegmentDelta", + "TranscriptionStreamSegmentDeltaTypedDict", + "TranscriptionStreamTextDelta", + "TranscriptionStreamTextDeltaTypedDict", + "UnarchiveModelResponse", + "UnarchiveModelResponseTypedDict", + "UnknownAgentTool", + "UnknownClassifierFineTuningJobDetailsIntegration", + "UnknownClassifierFineTuningJobIntegration", + "UnknownCompletionFineTuningJobDetailsIntegration", + "UnknownCompletionFineTuningJobDetailsRepository", + "UnknownCompletionFineTuningJobIntegration", + "UnknownCompletionFineTuningJobRepository", + "UnknownContentChunk", + "UnknownConversationEventsData", + "UnknownJobsAPIRoutesFineTuningCancelFineTuningJobResponse", + "UnknownJobsAPIRoutesFineTuningGetFineTuningJobResponse", + "UnknownJobsAPIRoutesFineTuningStartFineTuningJobResponse", + "UnknownJobsAPIRoutesFineTuningUpdateFineTunedModelResponse", + "UnknownListFineTuningJobsResponseData", + "UnknownModelConversationTool", + "UnknownModelListData", + "UnknownResponse", + "UnknownResponseRetrieveModelV1ModelsModelIDGet", + "UnknownTranscriptionStreamEventsData", + "UpdateAgentRequest", + "UpdateAgentRequestTool", + "UpdateAgentRequestToolTypedDict", + "UpdateAgentRequestTypedDict", + "UpdateDocumentRequest", + "UpdateDocumentRequestTypedDict", + "UpdateLibraryRequest", + "UpdateLibraryRequestTypedDict", + "UpdateModelRequest", + "UpdateModelRequestTypedDict", + "UsageInfo", + "UsageInfoTypedDict", + "UserMessage", + "UserMessageContent", + "UserMessageContentTypedDict", + "UserMessageTypedDict", + "ValidationError", + "ValidationErrorTypedDict", + "WandbIntegration", + "WandbIntegrationResult", + "WandbIntegrationResultTypedDict", + "WandbIntegrationTypedDict", + "WebSearchPremiumTool", + "WebSearchPremiumToolTypedDict", + "WebSearchTool", + "WebSearchToolTypedDict", +] + +_dynamic_imports: dict[str, str] = { + "Agent": ".agent", + "AgentTool": ".agent", + "AgentToolTypedDict": ".agent", + "AgentTypedDict": ".agent", + "UnknownAgentTool": ".agent", + "AgentAliasResponse": ".agentaliasresponse", + "AgentAliasResponseTypedDict": ".agentaliasresponse", + "AgentConversation": ".agentconversation", + "AgentConversationAgentVersion": ".agentconversation", + "AgentConversationAgentVersionTypedDict": ".agentconversation", + "AgentConversationTypedDict": ".agentconversation", + "AgentHandoffDoneEvent": ".agenthandoffdoneevent", + "AgentHandoffDoneEventTypedDict": ".agenthandoffdoneevent", + "AgentHandoffEntry": ".agenthandoffentry", + "AgentHandoffEntryTypedDict": ".agenthandoffentry", + "AgentHandoffStartedEvent": ".agenthandoffstartedevent", + "AgentHandoffStartedEventTypedDict": ".agenthandoffstartedevent", + "AgentsAPIV1AgentsCreateOrUpdateAliasRequest": ".agents_api_v1_agents_create_or_update_aliasop", + "AgentsAPIV1AgentsCreateOrUpdateAliasRequestTypedDict": ".agents_api_v1_agents_create_or_update_aliasop", + "AgentsAPIV1AgentsDeleteAliasRequest": ".agents_api_v1_agents_delete_aliasop", + "AgentsAPIV1AgentsDeleteAliasRequestTypedDict": ".agents_api_v1_agents_delete_aliasop", + "AgentsAPIV1AgentsDeleteRequest": ".agents_api_v1_agents_deleteop", + "AgentsAPIV1AgentsDeleteRequestTypedDict": ".agents_api_v1_agents_deleteop", + "AgentsAPIV1AgentsGetVersionRequest": ".agents_api_v1_agents_get_versionop", + "AgentsAPIV1AgentsGetVersionRequestTypedDict": ".agents_api_v1_agents_get_versionop", + "AgentsAPIV1AgentsGetAgentVersion": ".agents_api_v1_agents_getop", + "AgentsAPIV1AgentsGetAgentVersionTypedDict": ".agents_api_v1_agents_getop", + "AgentsAPIV1AgentsGetRequest": ".agents_api_v1_agents_getop", + "AgentsAPIV1AgentsGetRequestTypedDict": ".agents_api_v1_agents_getop", + "AgentsAPIV1AgentsListVersionAliasesRequest": ".agents_api_v1_agents_list_version_aliasesop", + "AgentsAPIV1AgentsListVersionAliasesRequestTypedDict": ".agents_api_v1_agents_list_version_aliasesop", + "AgentsAPIV1AgentsListVersionsRequest": ".agents_api_v1_agents_list_versionsop", + "AgentsAPIV1AgentsListVersionsRequestTypedDict": ".agents_api_v1_agents_list_versionsop", + "AgentsAPIV1AgentsListRequest": ".agents_api_v1_agents_listop", + "AgentsAPIV1AgentsListRequestTypedDict": ".agents_api_v1_agents_listop", + "AgentsAPIV1AgentsUpdateVersionRequest": ".agents_api_v1_agents_update_versionop", + "AgentsAPIV1AgentsUpdateVersionRequestTypedDict": ".agents_api_v1_agents_update_versionop", + "AgentsAPIV1AgentsUpdateRequest": ".agents_api_v1_agents_updateop", + "AgentsAPIV1AgentsUpdateRequestTypedDict": ".agents_api_v1_agents_updateop", + "AgentsAPIV1ConversationsAppendStreamRequest": ".agents_api_v1_conversations_append_streamop", + "AgentsAPIV1ConversationsAppendStreamRequestTypedDict": ".agents_api_v1_conversations_append_streamop", + "AgentsAPIV1ConversationsAppendRequest": ".agents_api_v1_conversations_appendop", + "AgentsAPIV1ConversationsAppendRequestTypedDict": ".agents_api_v1_conversations_appendop", + "AgentsAPIV1ConversationsDeleteRequest": ".agents_api_v1_conversations_deleteop", + "AgentsAPIV1ConversationsDeleteRequestTypedDict": ".agents_api_v1_conversations_deleteop", + "AgentsAPIV1ConversationsGetRequest": ".agents_api_v1_conversations_getop", + "AgentsAPIV1ConversationsGetRequestTypedDict": ".agents_api_v1_conversations_getop", + "ResponseV1ConversationsGet": ".agents_api_v1_conversations_getop", + "ResponseV1ConversationsGetTypedDict": ".agents_api_v1_conversations_getop", + "AgentsAPIV1ConversationsHistoryRequest": ".agents_api_v1_conversations_historyop", + "AgentsAPIV1ConversationsHistoryRequestTypedDict": ".agents_api_v1_conversations_historyop", + "AgentsAPIV1ConversationsListRequest": ".agents_api_v1_conversations_listop", + "AgentsAPIV1ConversationsListRequestTypedDict": ".agents_api_v1_conversations_listop", + "AgentsAPIV1ConversationsListResponse": ".agents_api_v1_conversations_listop", + "AgentsAPIV1ConversationsListResponseTypedDict": ".agents_api_v1_conversations_listop", + "AgentsAPIV1ConversationsMessagesRequest": ".agents_api_v1_conversations_messagesop", + "AgentsAPIV1ConversationsMessagesRequestTypedDict": ".agents_api_v1_conversations_messagesop", + "AgentsAPIV1ConversationsRestartStreamRequest": ".agents_api_v1_conversations_restart_streamop", + "AgentsAPIV1ConversationsRestartStreamRequestTypedDict": ".agents_api_v1_conversations_restart_streamop", + "AgentsAPIV1ConversationsRestartRequest": ".agents_api_v1_conversations_restartop", + "AgentsAPIV1ConversationsRestartRequestTypedDict": ".agents_api_v1_conversations_restartop", + "AgentsCompletionRequest": ".agentscompletionrequest", + "AgentsCompletionRequestMessage": ".agentscompletionrequest", + "AgentsCompletionRequestMessageTypedDict": ".agentscompletionrequest", + "AgentsCompletionRequestStop": ".agentscompletionrequest", + "AgentsCompletionRequestStopTypedDict": ".agentscompletionrequest", + "AgentsCompletionRequestToolChoice": ".agentscompletionrequest", + "AgentsCompletionRequestToolChoiceTypedDict": ".agentscompletionrequest", + "AgentsCompletionRequestTypedDict": ".agentscompletionrequest", + "AgentsCompletionStreamRequest": ".agentscompletionstreamrequest", + "AgentsCompletionStreamRequestMessage": ".agentscompletionstreamrequest", + "AgentsCompletionStreamRequestMessageTypedDict": ".agentscompletionstreamrequest", + "AgentsCompletionStreamRequestStop": ".agentscompletionstreamrequest", + "AgentsCompletionStreamRequestStopTypedDict": ".agentscompletionstreamrequest", + "AgentsCompletionStreamRequestToolChoice": ".agentscompletionstreamrequest", + "AgentsCompletionStreamRequestToolChoiceTypedDict": ".agentscompletionstreamrequest", + "AgentsCompletionStreamRequestTypedDict": ".agentscompletionstreamrequest", + "APIEndpoint": ".apiendpoint", + "ArchiveModelResponse": ".archivemodelresponse", + "ArchiveModelResponseTypedDict": ".archivemodelresponse", + "AssistantMessage": ".assistantmessage", + "AssistantMessageContent": ".assistantmessage", + "AssistantMessageContentTypedDict": ".assistantmessage", + "AssistantMessageTypedDict": ".assistantmessage", + "AudioChunk": ".audiochunk", + "AudioChunkTypedDict": ".audiochunk", + "AudioEncoding": ".audioencoding", + "AudioFormat": ".audioformat", + "AudioFormatTypedDict": ".audioformat", + "AudioTranscriptionRequest": ".audiotranscriptionrequest", + "AudioTranscriptionRequestTypedDict": ".audiotranscriptionrequest", + "AudioTranscriptionRequestStream": ".audiotranscriptionrequeststream", + "AudioTranscriptionRequestStreamTypedDict": ".audiotranscriptionrequeststream", + "BaseModelCard": ".basemodelcard", + "BaseModelCardTypedDict": ".basemodelcard", + "BatchError": ".batcherror", + "BatchErrorTypedDict": ".batcherror", + "BatchJob": ".batchjob", + "BatchJobTypedDict": ".batchjob", + "BatchJobStatus": ".batchjobstatus", + "BatchRequest": ".batchrequest", + "BatchRequestTypedDict": ".batchrequest", + "BuiltInConnectors": ".builtinconnectors", + "ChatClassificationRequest": ".chatclassificationrequest", + "ChatClassificationRequestTypedDict": ".chatclassificationrequest", + "ChatCompletionChoice": ".chatcompletionchoice", + "ChatCompletionChoiceFinishReason": ".chatcompletionchoice", + "ChatCompletionChoiceTypedDict": ".chatcompletionchoice", + "ChatCompletionRequest": ".chatcompletionrequest", + "ChatCompletionRequestMessage": ".chatcompletionrequest", + "ChatCompletionRequestMessageTypedDict": ".chatcompletionrequest", + "ChatCompletionRequestStop": ".chatcompletionrequest", + "ChatCompletionRequestStopTypedDict": ".chatcompletionrequest", + "ChatCompletionRequestToolChoice": ".chatcompletionrequest", + "ChatCompletionRequestToolChoiceTypedDict": ".chatcompletionrequest", + "ChatCompletionRequestTypedDict": ".chatcompletionrequest", + "ChatCompletionResponse": ".chatcompletionresponse", + "ChatCompletionResponseTypedDict": ".chatcompletionresponse", + "ChatCompletionStreamRequest": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestMessage": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestMessageTypedDict": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestStop": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestStopTypedDict": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestToolChoice": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestToolChoiceTypedDict": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestTypedDict": ".chatcompletionstreamrequest", + "ChatModerationRequest": ".chatmoderationrequest", + "ChatModerationRequestInputs1": ".chatmoderationrequest", + "ChatModerationRequestInputs1TypedDict": ".chatmoderationrequest", + "ChatModerationRequestInputs2": ".chatmoderationrequest", + "ChatModerationRequestInputs2TypedDict": ".chatmoderationrequest", + "ChatModerationRequestInputs3": ".chatmoderationrequest", + "ChatModerationRequestInputs3TypedDict": ".chatmoderationrequest", + "ChatModerationRequestTypedDict": ".chatmoderationrequest", + "Checkpoint": ".checkpoint", + "CheckpointTypedDict": ".checkpoint", + "ClassificationRequest": ".classificationrequest", + "ClassificationRequestInputs": ".classificationrequest", + "ClassificationRequestInputsTypedDict": ".classificationrequest", + "ClassificationRequestTypedDict": ".classificationrequest", + "ClassificationResponse": ".classificationresponse", + "ClassificationResponseTypedDict": ".classificationresponse", + "ClassificationTargetResult": ".classificationtargetresult", + "ClassificationTargetResultTypedDict": ".classificationtargetresult", + "ClassifierFineTunedModel": ".classifierfinetunedmodel", + "ClassifierFineTunedModelTypedDict": ".classifierfinetunedmodel", + "ClassifierFineTuningJob": ".classifierfinetuningjob", + "ClassifierFineTuningJobIntegration": ".classifierfinetuningjob", + "ClassifierFineTuningJobIntegrationTypedDict": ".classifierfinetuningjob", + "ClassifierFineTuningJobStatus": ".classifierfinetuningjob", + "ClassifierFineTuningJobTypedDict": ".classifierfinetuningjob", + "UnknownClassifierFineTuningJobIntegration": ".classifierfinetuningjob", + "ClassifierFineTuningJobDetails": ".classifierfinetuningjobdetails", + "ClassifierFineTuningJobDetailsIntegration": ".classifierfinetuningjobdetails", + "ClassifierFineTuningJobDetailsIntegrationTypedDict": ".classifierfinetuningjobdetails", + "ClassifierFineTuningJobDetailsStatus": ".classifierfinetuningjobdetails", + "ClassifierFineTuningJobDetailsTypedDict": ".classifierfinetuningjobdetails", + "UnknownClassifierFineTuningJobDetailsIntegration": ".classifierfinetuningjobdetails", + "ClassifierTarget": ".classifiertarget", + "ClassifierTargetTypedDict": ".classifiertarget", + "ClassifierTargetResult": ".classifiertargetresult", + "ClassifierTargetResultTypedDict": ".classifiertargetresult", + "ClassifierTrainingParameters": ".classifiertrainingparameters", + "ClassifierTrainingParametersTypedDict": ".classifiertrainingparameters", + "CodeInterpreterTool": ".codeinterpretertool", + "CodeInterpreterToolTypedDict": ".codeinterpretertool", + "CompletionArgs": ".completionargs", + "CompletionArgsTypedDict": ".completionargs", + "CompletionArgsStop": ".completionargsstop", + "CompletionArgsStopTypedDict": ".completionargsstop", + "CompletionChunk": ".completionchunk", + "CompletionChunkTypedDict": ".completionchunk", + "CompletionEvent": ".completionevent", + "CompletionEventTypedDict": ".completionevent", + "CompletionFineTunedModel": ".completionfinetunedmodel", + "CompletionFineTunedModelTypedDict": ".completionfinetunedmodel", + "CompletionFineTuningJob": ".completionfinetuningjob", + "CompletionFineTuningJobIntegration": ".completionfinetuningjob", + "CompletionFineTuningJobIntegrationTypedDict": ".completionfinetuningjob", + "CompletionFineTuningJobRepository": ".completionfinetuningjob", + "CompletionFineTuningJobRepositoryTypedDict": ".completionfinetuningjob", + "CompletionFineTuningJobStatus": ".completionfinetuningjob", + "CompletionFineTuningJobTypedDict": ".completionfinetuningjob", + "UnknownCompletionFineTuningJobIntegration": ".completionfinetuningjob", + "UnknownCompletionFineTuningJobRepository": ".completionfinetuningjob", + "CompletionFineTuningJobDetails": ".completionfinetuningjobdetails", + "CompletionFineTuningJobDetailsIntegration": ".completionfinetuningjobdetails", + "CompletionFineTuningJobDetailsIntegrationTypedDict": ".completionfinetuningjobdetails", + "CompletionFineTuningJobDetailsRepository": ".completionfinetuningjobdetails", + "CompletionFineTuningJobDetailsRepositoryTypedDict": ".completionfinetuningjobdetails", + "CompletionFineTuningJobDetailsStatus": ".completionfinetuningjobdetails", + "CompletionFineTuningJobDetailsTypedDict": ".completionfinetuningjobdetails", + "UnknownCompletionFineTuningJobDetailsIntegration": ".completionfinetuningjobdetails", + "UnknownCompletionFineTuningJobDetailsRepository": ".completionfinetuningjobdetails", + "CompletionResponseStreamChoice": ".completionresponsestreamchoice", + "CompletionResponseStreamChoiceFinishReason": ".completionresponsestreamchoice", + "CompletionResponseStreamChoiceTypedDict": ".completionresponsestreamchoice", + "CompletionTrainingParameters": ".completiontrainingparameters", + "CompletionTrainingParametersTypedDict": ".completiontrainingparameters", + "ContentChunk": ".contentchunk", + "ContentChunkTypedDict": ".contentchunk", + "UnknownContentChunk": ".contentchunk", + "ConversationAppendRequest": ".conversationappendrequest", + "ConversationAppendRequestHandoffExecution": ".conversationappendrequest", + "ConversationAppendRequestTypedDict": ".conversationappendrequest", + "ConversationAppendStreamRequest": ".conversationappendstreamrequest", + "ConversationAppendStreamRequestHandoffExecution": ".conversationappendstreamrequest", + "ConversationAppendStreamRequestTypedDict": ".conversationappendstreamrequest", + "ConversationEvents": ".conversationevents", + "ConversationEventsData": ".conversationevents", + "ConversationEventsDataTypedDict": ".conversationevents", + "ConversationEventsTypedDict": ".conversationevents", + "UnknownConversationEventsData": ".conversationevents", + "ConversationHistory": ".conversationhistory", + "ConversationHistoryTypedDict": ".conversationhistory", + "Entry": ".conversationhistory", + "EntryTypedDict": ".conversationhistory", + "ConversationInputs": ".conversationinputs", + "ConversationInputsTypedDict": ".conversationinputs", + "ConversationMessages": ".conversationmessages", + "ConversationMessagesTypedDict": ".conversationmessages", + "ConversationRequest": ".conversationrequest", + "ConversationRequestAgentVersion": ".conversationrequest", + "ConversationRequestAgentVersionTypedDict": ".conversationrequest", + "ConversationRequestHandoffExecution": ".conversationrequest", + "ConversationRequestTool": ".conversationrequest", + "ConversationRequestToolTypedDict": ".conversationrequest", + "ConversationRequestTypedDict": ".conversationrequest", + "ConversationResponse": ".conversationresponse", + "ConversationResponseTypedDict": ".conversationresponse", + "Output": ".conversationresponse", + "OutputTypedDict": ".conversationresponse", + "ConversationRestartRequest": ".conversationrestartrequest", + "ConversationRestartRequestAgentVersion": ".conversationrestartrequest", + "ConversationRestartRequestAgentVersionTypedDict": ".conversationrestartrequest", + "ConversationRestartRequestHandoffExecution": ".conversationrestartrequest", + "ConversationRestartRequestTypedDict": ".conversationrestartrequest", + "ConversationRestartStreamRequest": ".conversationrestartstreamrequest", + "ConversationRestartStreamRequestAgentVersion": ".conversationrestartstreamrequest", + "ConversationRestartStreamRequestAgentVersionTypedDict": ".conversationrestartstreamrequest", + "ConversationRestartStreamRequestHandoffExecution": ".conversationrestartstreamrequest", + "ConversationRestartStreamRequestTypedDict": ".conversationrestartstreamrequest", + "ConversationStreamRequest": ".conversationstreamrequest", + "ConversationStreamRequestAgentVersion": ".conversationstreamrequest", + "ConversationStreamRequestAgentVersionTypedDict": ".conversationstreamrequest", + "ConversationStreamRequestHandoffExecution": ".conversationstreamrequest", + "ConversationStreamRequestTool": ".conversationstreamrequest", + "ConversationStreamRequestToolTypedDict": ".conversationstreamrequest", + "ConversationStreamRequestTypedDict": ".conversationstreamrequest", + "ConversationThinkChunk": ".conversationthinkchunk", + "ConversationThinkChunkThinking": ".conversationthinkchunk", + "ConversationThinkChunkThinkingTypedDict": ".conversationthinkchunk", + "ConversationThinkChunkTypedDict": ".conversationthinkchunk", + "ConversationUsageInfo": ".conversationusageinfo", + "ConversationUsageInfoTypedDict": ".conversationusageinfo", + "CreateAgentRequest": ".createagentrequest", + "CreateAgentRequestTool": ".createagentrequest", + "CreateAgentRequestToolTypedDict": ".createagentrequest", + "CreateAgentRequestTypedDict": ".createagentrequest", + "CreateBatchJobRequest": ".createbatchjobrequest", + "CreateBatchJobRequestTypedDict": ".createbatchjobrequest", + "CreateFileResponse": ".createfileresponse", + "CreateFileResponseTypedDict": ".createfileresponse", + "CreateFineTuningJobRequest": ".createfinetuningjobrequest", + "CreateFineTuningJobRequestIntegration": ".createfinetuningjobrequest", + "CreateFineTuningJobRequestIntegrationTypedDict": ".createfinetuningjobrequest", + "CreateFineTuningJobRequestRepository": ".createfinetuningjobrequest", + "CreateFineTuningJobRequestRepositoryTypedDict": ".createfinetuningjobrequest", + "CreateFineTuningJobRequestTypedDict": ".createfinetuningjobrequest", + "Hyperparameters": ".createfinetuningjobrequest", + "HyperparametersTypedDict": ".createfinetuningjobrequest", + "CreateLibraryRequest": ".createlibraryrequest", + "CreateLibraryRequestTypedDict": ".createlibraryrequest", + "DeleteModelV1ModelsModelIDDeleteRequest": ".delete_model_v1_models_model_id_deleteop", + "DeleteModelV1ModelsModelIDDeleteRequestTypedDict": ".delete_model_v1_models_model_id_deleteop", + "DeleteFileResponse": ".deletefileresponse", + "DeleteFileResponseTypedDict": ".deletefileresponse", + "DeleteModelOut": ".deletemodelout", + "DeleteModelOutTypedDict": ".deletemodelout", + "DeltaMessage": ".deltamessage", + "DeltaMessageContent": ".deltamessage", + "DeltaMessageContentTypedDict": ".deltamessage", + "DeltaMessageTypedDict": ".deltamessage", + "Document": ".document", + "DocumentTypedDict": ".document", + "DocumentLibraryTool": ".documentlibrarytool", + "DocumentLibraryToolTypedDict": ".documentlibrarytool", + "DocumentTextContent": ".documenttextcontent", + "DocumentTextContentTypedDict": ".documenttextcontent", + "DocumentURLChunk": ".documenturlchunk", + "DocumentURLChunkTypedDict": ".documenturlchunk", + "EmbeddingDtype": ".embeddingdtype", + "EmbeddingRequest": ".embeddingrequest", + "EmbeddingRequestInputs": ".embeddingrequest", + "EmbeddingRequestInputsTypedDict": ".embeddingrequest", + "EmbeddingRequestTypedDict": ".embeddingrequest", + "EmbeddingResponse": ".embeddingresponse", + "EmbeddingResponseTypedDict": ".embeddingresponse", + "EmbeddingResponseData": ".embeddingresponsedata", + "EmbeddingResponseDataTypedDict": ".embeddingresponsedata", + "EncodingFormat": ".encodingformat", + "EntityType": ".entitytype", + "Event": ".event", + "EventTypedDict": ".event", + "File": ".file", + "FileTypedDict": ".file", + "FileChunk": ".filechunk", + "FileChunkTypedDict": ".filechunk", + "FilePurpose": ".filepurpose", + "FilesAPIRoutesDeleteFileRequest": ".files_api_routes_delete_fileop", + "FilesAPIRoutesDeleteFileRequestTypedDict": ".files_api_routes_delete_fileop", + "FilesAPIRoutesDownloadFileRequest": ".files_api_routes_download_fileop", + "FilesAPIRoutesDownloadFileRequestTypedDict": ".files_api_routes_download_fileop", + "FilesAPIRoutesGetSignedURLRequest": ".files_api_routes_get_signed_urlop", + "FilesAPIRoutesGetSignedURLRequestTypedDict": ".files_api_routes_get_signed_urlop", + "FilesAPIRoutesListFilesRequest": ".files_api_routes_list_filesop", + "FilesAPIRoutesListFilesRequestTypedDict": ".files_api_routes_list_filesop", + "FilesAPIRoutesRetrieveFileRequest": ".files_api_routes_retrieve_fileop", + "FilesAPIRoutesRetrieveFileRequestTypedDict": ".files_api_routes_retrieve_fileop", + "MultiPartBodyParams": ".files_api_routes_upload_fileop", + "MultiPartBodyParamsTypedDict": ".files_api_routes_upload_fileop", + "FileSchema": ".fileschema", + "FileSchemaTypedDict": ".fileschema", + "FIMCompletionRequest": ".fimcompletionrequest", + "FIMCompletionRequestStop": ".fimcompletionrequest", + "FIMCompletionRequestStopTypedDict": ".fimcompletionrequest", + "FIMCompletionRequestTypedDict": ".fimcompletionrequest", + "FIMCompletionResponse": ".fimcompletionresponse", + "FIMCompletionResponseTypedDict": ".fimcompletionresponse", + "FIMCompletionStreamRequest": ".fimcompletionstreamrequest", + "FIMCompletionStreamRequestStop": ".fimcompletionstreamrequest", + "FIMCompletionStreamRequestStopTypedDict": ".fimcompletionstreamrequest", + "FIMCompletionStreamRequestTypedDict": ".fimcompletionstreamrequest", + "FineTuneableModelType": ".finetuneablemodeltype", + "FineTunedModelCapabilities": ".finetunedmodelcapabilities", + "FineTunedModelCapabilitiesTypedDict": ".finetunedmodelcapabilities", + "FTClassifierLossFunction": ".ftclassifierlossfunction", + "FTModelCard": ".ftmodelcard", + "FTModelCardTypedDict": ".ftmodelcard", + "Function": ".function", + "FunctionTypedDict": ".function", + "Arguments": ".functioncall", + "ArgumentsTypedDict": ".functioncall", + "FunctionCall": ".functioncall", + "FunctionCallTypedDict": ".functioncall", + "FunctionCallEntry": ".functioncallentry", + "FunctionCallEntryConfirmationStatus": ".functioncallentry", + "FunctionCallEntryTypedDict": ".functioncallentry", + "FunctionCallEntryArguments": ".functioncallentryarguments", + "FunctionCallEntryArgumentsTypedDict": ".functioncallentryarguments", + "FunctionCallEvent": ".functioncallevent", + "FunctionCallEventConfirmationStatus": ".functioncallevent", + "FunctionCallEventTypedDict": ".functioncallevent", + "FunctionName": ".functionname", + "FunctionNameTypedDict": ".functionname", + "FunctionResultEntry": ".functionresultentry", + "FunctionResultEntryTypedDict": ".functionresultentry", + "FunctionTool": ".functiontool", + "FunctionToolTypedDict": ".functiontool", + "GetFileResponse": ".getfileresponse", + "GetFileResponseTypedDict": ".getfileresponse", + "GetSignedURLResponse": ".getsignedurlresponse", + "GetSignedURLResponseTypedDict": ".getsignedurlresponse", + "GithubRepository": ".githubrepository", + "GithubRepositoryTypedDict": ".githubrepository", + "GithubRepositoryIn": ".githubrepositoryin", + "GithubRepositoryInTypedDict": ".githubrepositoryin", + "ImageDetail": ".imagedetail", + "ImageGenerationTool": ".imagegenerationtool", + "ImageGenerationToolTypedDict": ".imagegenerationtool", + "ImageURL": ".imageurl", + "ImageURLTypedDict": ".imageurl", + "ImageURLChunk": ".imageurlchunk", + "ImageURLChunkTypedDict": ".imageurlchunk", + "ImageURLUnion": ".imageurlchunk", + "ImageURLUnionTypedDict": ".imageurlchunk", + "InputEntries": ".inputentries", + "InputEntriesTypedDict": ".inputentries", + "Inputs": ".inputs", + "InputsTypedDict": ".inputs", + "InstructRequest": ".instructrequest", + "InstructRequestMessage": ".instructrequest", + "InstructRequestMessageTypedDict": ".instructrequest", + "InstructRequestTypedDict": ".instructrequest", + "JobMetadata": ".jobmetadata", + "JobMetadataTypedDict": ".jobmetadata", + "JobsAPIRoutesBatchCancelBatchJobRequest": ".jobs_api_routes_batch_cancel_batch_jobop", + "JobsAPIRoutesBatchCancelBatchJobRequestTypedDict": ".jobs_api_routes_batch_cancel_batch_jobop", + "JobsAPIRoutesBatchGetBatchJobRequest": ".jobs_api_routes_batch_get_batch_jobop", + "JobsAPIRoutesBatchGetBatchJobRequestTypedDict": ".jobs_api_routes_batch_get_batch_jobop", + "JobsAPIRoutesBatchGetBatchJobsRequest": ".jobs_api_routes_batch_get_batch_jobsop", + "JobsAPIRoutesBatchGetBatchJobsRequestTypedDict": ".jobs_api_routes_batch_get_batch_jobsop", + "OrderBy": ".jobs_api_routes_batch_get_batch_jobsop", + "JobsAPIRoutesFineTuningArchiveFineTunedModelRequest": ".jobs_api_routes_fine_tuning_archive_fine_tuned_modelop", + "JobsAPIRoutesFineTuningArchiveFineTunedModelRequestTypedDict": ".jobs_api_routes_fine_tuning_archive_fine_tuned_modelop", + "JobsAPIRoutesFineTuningCancelFineTuningJobRequest": ".jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop", + "JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict": ".jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop", + "JobsAPIRoutesFineTuningCancelFineTuningJobResponse": ".jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop", + "JobsAPIRoutesFineTuningCancelFineTuningJobResponseTypedDict": ".jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop", + "UnknownJobsAPIRoutesFineTuningCancelFineTuningJobResponse": ".jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop", + "JobsAPIRoutesFineTuningCreateFineTuningJobResponse": ".jobs_api_routes_fine_tuning_create_fine_tuning_jobop", + "JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict": ".jobs_api_routes_fine_tuning_create_fine_tuning_jobop", + "Response": ".jobs_api_routes_fine_tuning_create_fine_tuning_jobop", + "ResponseTypedDict": ".jobs_api_routes_fine_tuning_create_fine_tuning_jobop", + "UnknownResponse": ".jobs_api_routes_fine_tuning_create_fine_tuning_jobop", + "JobsAPIRoutesFineTuningGetFineTuningJobRequest": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobop", + "JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobop", + "JobsAPIRoutesFineTuningGetFineTuningJobResponse": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobop", + "JobsAPIRoutesFineTuningGetFineTuningJobResponseTypedDict": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobop", + "UnknownJobsAPIRoutesFineTuningGetFineTuningJobResponse": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobop", + "JobsAPIRoutesFineTuningGetFineTuningJobsRequest": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobsop", + "JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobsop", + "JobsAPIRoutesFineTuningGetFineTuningJobsStatus": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobsop", + "JobsAPIRoutesFineTuningStartFineTuningJobRequest": ".jobs_api_routes_fine_tuning_start_fine_tuning_jobop", + "JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict": ".jobs_api_routes_fine_tuning_start_fine_tuning_jobop", + "JobsAPIRoutesFineTuningStartFineTuningJobResponse": ".jobs_api_routes_fine_tuning_start_fine_tuning_jobop", + "JobsAPIRoutesFineTuningStartFineTuningJobResponseTypedDict": ".jobs_api_routes_fine_tuning_start_fine_tuning_jobop", + "UnknownJobsAPIRoutesFineTuningStartFineTuningJobResponse": ".jobs_api_routes_fine_tuning_start_fine_tuning_jobop", + "JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest": ".jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop", + "JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequestTypedDict": ".jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop", + "JobsAPIRoutesFineTuningUpdateFineTunedModelRequest": ".jobs_api_routes_fine_tuning_update_fine_tuned_modelop", + "JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict": ".jobs_api_routes_fine_tuning_update_fine_tuned_modelop", + "JobsAPIRoutesFineTuningUpdateFineTunedModelResponse": ".jobs_api_routes_fine_tuning_update_fine_tuned_modelop", + "JobsAPIRoutesFineTuningUpdateFineTunedModelResponseTypedDict": ".jobs_api_routes_fine_tuning_update_fine_tuned_modelop", + "UnknownJobsAPIRoutesFineTuningUpdateFineTunedModelResponse": ".jobs_api_routes_fine_tuning_update_fine_tuned_modelop", + "JSONSchema": ".jsonschema", + "JSONSchemaTypedDict": ".jsonschema", + "LegacyJobMetadata": ".legacyjobmetadata", + "LegacyJobMetadataTypedDict": ".legacyjobmetadata", + "LibrariesDeleteV1Request": ".libraries_delete_v1op", + "LibrariesDeleteV1RequestTypedDict": ".libraries_delete_v1op", + "LibrariesDocumentsDeleteV1Request": ".libraries_documents_delete_v1op", + "LibrariesDocumentsDeleteV1RequestTypedDict": ".libraries_documents_delete_v1op", + "LibrariesDocumentsGetExtractedTextSignedURLV1Request": ".libraries_documents_get_extracted_text_signed_url_v1op", + "LibrariesDocumentsGetExtractedTextSignedURLV1RequestTypedDict": ".libraries_documents_get_extracted_text_signed_url_v1op", + "LibrariesDocumentsGetSignedURLV1Request": ".libraries_documents_get_signed_url_v1op", + "LibrariesDocumentsGetSignedURLV1RequestTypedDict": ".libraries_documents_get_signed_url_v1op", + "LibrariesDocumentsGetStatusV1Request": ".libraries_documents_get_status_v1op", + "LibrariesDocumentsGetStatusV1RequestTypedDict": ".libraries_documents_get_status_v1op", + "LibrariesDocumentsGetTextContentV1Request": ".libraries_documents_get_text_content_v1op", + "LibrariesDocumentsGetTextContentV1RequestTypedDict": ".libraries_documents_get_text_content_v1op", + "LibrariesDocumentsGetV1Request": ".libraries_documents_get_v1op", + "LibrariesDocumentsGetV1RequestTypedDict": ".libraries_documents_get_v1op", + "LibrariesDocumentsListV1Request": ".libraries_documents_list_v1op", + "LibrariesDocumentsListV1RequestTypedDict": ".libraries_documents_list_v1op", + "LibrariesDocumentsReprocessV1Request": ".libraries_documents_reprocess_v1op", + "LibrariesDocumentsReprocessV1RequestTypedDict": ".libraries_documents_reprocess_v1op", + "LibrariesDocumentsUpdateV1Request": ".libraries_documents_update_v1op", + "LibrariesDocumentsUpdateV1RequestTypedDict": ".libraries_documents_update_v1op", + "DocumentUpload": ".libraries_documents_upload_v1op", + "DocumentUploadTypedDict": ".libraries_documents_upload_v1op", + "LibrariesDocumentsUploadV1Request": ".libraries_documents_upload_v1op", + "LibrariesDocumentsUploadV1RequestTypedDict": ".libraries_documents_upload_v1op", + "LibrariesGetV1Request": ".libraries_get_v1op", + "LibrariesGetV1RequestTypedDict": ".libraries_get_v1op", + "LibrariesShareCreateV1Request": ".libraries_share_create_v1op", + "LibrariesShareCreateV1RequestTypedDict": ".libraries_share_create_v1op", + "LibrariesShareDeleteV1Request": ".libraries_share_delete_v1op", + "LibrariesShareDeleteV1RequestTypedDict": ".libraries_share_delete_v1op", + "LibrariesShareListV1Request": ".libraries_share_list_v1op", + "LibrariesShareListV1RequestTypedDict": ".libraries_share_list_v1op", + "LibrariesUpdateV1Request": ".libraries_update_v1op", + "LibrariesUpdateV1RequestTypedDict": ".libraries_update_v1op", + "Library": ".library", + "LibraryTypedDict": ".library", + "ListBatchJobsResponse": ".listbatchjobsresponse", + "ListBatchJobsResponseTypedDict": ".listbatchjobsresponse", + "ListDocumentsResponse": ".listdocumentsresponse", + "ListDocumentsResponseTypedDict": ".listdocumentsresponse", + "ListFilesResponse": ".listfilesresponse", + "ListFilesResponseTypedDict": ".listfilesresponse", + "ListFineTuningJobsResponse": ".listfinetuningjobsresponse", + "ListFineTuningJobsResponseData": ".listfinetuningjobsresponse", + "ListFineTuningJobsResponseDataTypedDict": ".listfinetuningjobsresponse", + "ListFineTuningJobsResponseTypedDict": ".listfinetuningjobsresponse", + "UnknownListFineTuningJobsResponseData": ".listfinetuningjobsresponse", + "ListLibrariesResponse": ".listlibrariesresponse", + "ListLibrariesResponseTypedDict": ".listlibrariesresponse", + "ListSharingOut": ".listsharingout", + "ListSharingOutTypedDict": ".listsharingout", + "MessageEntries": ".messageentries", + "MessageEntriesTypedDict": ".messageentries", + "MessageInputContentChunks": ".messageinputcontentchunks", + "MessageInputContentChunksTypedDict": ".messageinputcontentchunks", + "MessageInputEntry": ".messageinputentry", + "MessageInputEntryContent": ".messageinputentry", + "MessageInputEntryContentTypedDict": ".messageinputentry", + "MessageInputEntryTypedDict": ".messageinputentry", + "Role": ".messageinputentry", + "MessageOutputContentChunks": ".messageoutputcontentchunks", + "MessageOutputContentChunksTypedDict": ".messageoutputcontentchunks", + "MessageOutputEntry": ".messageoutputentry", + "MessageOutputEntryContent": ".messageoutputentry", + "MessageOutputEntryContentTypedDict": ".messageoutputentry", + "MessageOutputEntryTypedDict": ".messageoutputentry", + "MessageOutputEvent": ".messageoutputevent", + "MessageOutputEventContent": ".messageoutputevent", + "MessageOutputEventContentTypedDict": ".messageoutputevent", + "MessageOutputEventTypedDict": ".messageoutputevent", + "Metric": ".metric", + "MetricTypedDict": ".metric", + "MistralPromptMode": ".mistralpromptmode", + "ModelCapabilities": ".modelcapabilities", + "ModelCapabilitiesTypedDict": ".modelcapabilities", + "ModelConversation": ".modelconversation", + "ModelConversationTool": ".modelconversation", + "ModelConversationToolTypedDict": ".modelconversation", + "ModelConversationTypedDict": ".modelconversation", + "UnknownModelConversationTool": ".modelconversation", + "ModelList": ".modellist", + "ModelListData": ".modellist", + "ModelListDataTypedDict": ".modellist", + "ModelListTypedDict": ".modellist", + "UnknownModelListData": ".modellist", + "ModerationObject": ".moderationobject", + "ModerationObjectTypedDict": ".moderationobject", + "ModerationResponse": ".moderationresponse", + "ModerationResponseTypedDict": ".moderationresponse", + "OCRImageObject": ".ocrimageobject", + "OCRImageObjectTypedDict": ".ocrimageobject", + "OCRPageDimensions": ".ocrpagedimensions", + "OCRPageDimensionsTypedDict": ".ocrpagedimensions", + "OCRPageObject": ".ocrpageobject", + "OCRPageObjectTypedDict": ".ocrpageobject", + "DocumentUnion": ".ocrrequest", + "DocumentUnionTypedDict": ".ocrrequest", + "OCRRequest": ".ocrrequest", + "OCRRequestTypedDict": ".ocrrequest", + "TableFormat": ".ocrrequest", + "OCRResponse": ".ocrresponse", + "OCRResponseTypedDict": ".ocrresponse", + "Format": ".ocrtableobject", + "OCRTableObject": ".ocrtableobject", + "OCRTableObjectTypedDict": ".ocrtableobject", + "OCRUsageInfo": ".ocrusageinfo", + "OCRUsageInfoTypedDict": ".ocrusageinfo", + "OutputContentChunks": ".outputcontentchunks", + "OutputContentChunksTypedDict": ".outputcontentchunks", + "PaginationInfo": ".paginationinfo", + "PaginationInfoTypedDict": ".paginationinfo", + "Prediction": ".prediction", + "PredictionTypedDict": ".prediction", + "ProcessingStatusOut": ".processingstatusout", + "ProcessingStatusOutTypedDict": ".processingstatusout", + "ProcessStatus": ".processstatus", + "RealtimeTranscriptionError": ".realtimetranscriptionerror", + "RealtimeTranscriptionErrorTypedDict": ".realtimetranscriptionerror", + "RealtimeTranscriptionErrorDetail": ".realtimetranscriptionerrordetail", + "RealtimeTranscriptionErrorDetailMessage": ".realtimetranscriptionerrordetail", + "RealtimeTranscriptionErrorDetailMessageTypedDict": ".realtimetranscriptionerrordetail", + "RealtimeTranscriptionErrorDetailTypedDict": ".realtimetranscriptionerrordetail", + "RealtimeTranscriptionInputAudioAppend": ".realtimetranscriptioninputaudioappend", + "RealtimeTranscriptionInputAudioAppendTypedDict": ".realtimetranscriptioninputaudioappend", + "RealtimeTranscriptionInputAudioEnd": ".realtimetranscriptioninputaudioend", + "RealtimeTranscriptionInputAudioEndTypedDict": ".realtimetranscriptioninputaudioend", + "RealtimeTranscriptionInputAudioFlush": ".realtimetranscriptioninputaudioflush", + "RealtimeTranscriptionInputAudioFlushTypedDict": ".realtimetranscriptioninputaudioflush", + "RealtimeTranscriptionSession": ".realtimetranscriptionsession", + "RealtimeTranscriptionSessionTypedDict": ".realtimetranscriptionsession", + "RealtimeTranscriptionSessionCreated": ".realtimetranscriptionsessioncreated", + "RealtimeTranscriptionSessionCreatedTypedDict": ".realtimetranscriptionsessioncreated", + "RealtimeTranscriptionSessionUpdated": ".realtimetranscriptionsessionupdated", + "RealtimeTranscriptionSessionUpdatedTypedDict": ".realtimetranscriptionsessionupdated", + "RealtimeTranscriptionSessionUpdateMessage": ".realtimetranscriptionsessionupdatemessage", + "RealtimeTranscriptionSessionUpdateMessageTypedDict": ".realtimetranscriptionsessionupdatemessage", + "RealtimeTranscriptionSessionUpdatePayload": ".realtimetranscriptionsessionupdatepayload", + "RealtimeTranscriptionSessionUpdatePayloadTypedDict": ".realtimetranscriptionsessionupdatepayload", + "ReferenceChunk": ".referencechunk", + "ReferenceChunkTypedDict": ".referencechunk", + "RequestSource": ".requestsource", + "ResponseDoneEvent": ".responsedoneevent", + "ResponseDoneEventTypedDict": ".responsedoneevent", + "ResponseErrorEvent": ".responseerrorevent", + "ResponseErrorEventTypedDict": ".responseerrorevent", + "ResponseFormat": ".responseformat", + "ResponseFormatTypedDict": ".responseformat", + "ResponseFormats": ".responseformats", + "ResponseStartedEvent": ".responsestartedevent", + "ResponseStartedEventTypedDict": ".responsestartedevent", + "ResponseRetrieveModelV1ModelsModelIDGet": ".retrieve_model_v1_models_model_id_getop", + "ResponseRetrieveModelV1ModelsModelIDGetTypedDict": ".retrieve_model_v1_models_model_id_getop", + "RetrieveModelV1ModelsModelIDGetRequest": ".retrieve_model_v1_models_model_id_getop", + "RetrieveModelV1ModelsModelIDGetRequestTypedDict": ".retrieve_model_v1_models_model_id_getop", + "UnknownResponseRetrieveModelV1ModelsModelIDGet": ".retrieve_model_v1_models_model_id_getop", + "SampleType": ".sampletype", + "Security": ".security", + "SecurityTypedDict": ".security", + "ShareEnum": ".shareenum", + "SharingDelete": ".sharingdelete", + "SharingDeleteTypedDict": ".sharingdelete", + "SharingIn": ".sharingin", + "SharingInTypedDict": ".sharingin", + "SharingOut": ".sharingout", + "SharingOutTypedDict": ".sharingout", + "Source": ".source", + "SSETypes": ".ssetypes", + "SystemMessage": ".systemmessage", + "SystemMessageContent": ".systemmessage", + "SystemMessageContentTypedDict": ".systemmessage", + "SystemMessageTypedDict": ".systemmessage", + "SystemMessageContentChunks": ".systemmessagecontentchunks", + "SystemMessageContentChunksTypedDict": ".systemmessagecontentchunks", + "TextChunk": ".textchunk", + "TextChunkTypedDict": ".textchunk", + "ThinkChunk": ".thinkchunk", + "ThinkChunkThinking": ".thinkchunk", + "ThinkChunkThinkingTypedDict": ".thinkchunk", + "ThinkChunkTypedDict": ".thinkchunk", + "TimestampGranularity": ".timestampgranularity", + "Tool": ".tool", + "ToolTypedDict": ".tool", + "ToolCall": ".toolcall", + "ToolCallTypedDict": ".toolcall", + "Confirmation": ".toolcallconfirmation", + "ToolCallConfirmation": ".toolcallconfirmation", + "ToolCallConfirmationTypedDict": ".toolcallconfirmation", + "ToolChoice": ".toolchoice", + "ToolChoiceTypedDict": ".toolchoice", + "ToolChoiceEnum": ".toolchoiceenum", + "ToolConfiguration": ".toolconfiguration", + "ToolConfigurationTypedDict": ".toolconfiguration", + "ToolExecutionDeltaEvent": ".toolexecutiondeltaevent", + "ToolExecutionDeltaEventName": ".toolexecutiondeltaevent", + "ToolExecutionDeltaEventNameTypedDict": ".toolexecutiondeltaevent", + "ToolExecutionDeltaEventTypedDict": ".toolexecutiondeltaevent", + "ToolExecutionDoneEvent": ".toolexecutiondoneevent", + "ToolExecutionDoneEventName": ".toolexecutiondoneevent", + "ToolExecutionDoneEventNameTypedDict": ".toolexecutiondoneevent", + "ToolExecutionDoneEventTypedDict": ".toolexecutiondoneevent", + "ToolExecutionEntry": ".toolexecutionentry", + "ToolExecutionEntryName": ".toolexecutionentry", + "ToolExecutionEntryNameTypedDict": ".toolexecutionentry", + "ToolExecutionEntryTypedDict": ".toolexecutionentry", + "ToolExecutionStartedEvent": ".toolexecutionstartedevent", + "ToolExecutionStartedEventName": ".toolexecutionstartedevent", + "ToolExecutionStartedEventNameTypedDict": ".toolexecutionstartedevent", + "ToolExecutionStartedEventTypedDict": ".toolexecutionstartedevent", + "ToolFileChunk": ".toolfilechunk", + "ToolFileChunkTool": ".toolfilechunk", + "ToolFileChunkToolTypedDict": ".toolfilechunk", + "ToolFileChunkTypedDict": ".toolfilechunk", + "ToolMessage": ".toolmessage", + "ToolMessageContent": ".toolmessage", + "ToolMessageContentTypedDict": ".toolmessage", + "ToolMessageTypedDict": ".toolmessage", + "ToolReferenceChunk": ".toolreferencechunk", + "ToolReferenceChunkTool": ".toolreferencechunk", + "ToolReferenceChunkToolTypedDict": ".toolreferencechunk", + "ToolReferenceChunkTypedDict": ".toolreferencechunk", + "ToolTypes": ".tooltypes", + "TrainingFile": ".trainingfile", + "TrainingFileTypedDict": ".trainingfile", + "TranscriptionResponse": ".transcriptionresponse", + "TranscriptionResponseTypedDict": ".transcriptionresponse", + "TranscriptionSegmentChunk": ".transcriptionsegmentchunk", + "TranscriptionSegmentChunkTypedDict": ".transcriptionsegmentchunk", + "TranscriptionStreamDone": ".transcriptionstreamdone", + "TranscriptionStreamDoneTypedDict": ".transcriptionstreamdone", + "TranscriptionStreamEvents": ".transcriptionstreamevents", + "TranscriptionStreamEventsData": ".transcriptionstreamevents", + "TranscriptionStreamEventsDataTypedDict": ".transcriptionstreamevents", + "TranscriptionStreamEventsTypedDict": ".transcriptionstreamevents", + "UnknownTranscriptionStreamEventsData": ".transcriptionstreamevents", + "TranscriptionStreamEventTypes": ".transcriptionstreameventtypes", + "TranscriptionStreamLanguage": ".transcriptionstreamlanguage", + "TranscriptionStreamLanguageTypedDict": ".transcriptionstreamlanguage", + "TranscriptionStreamSegmentDelta": ".transcriptionstreamsegmentdelta", + "TranscriptionStreamSegmentDeltaTypedDict": ".transcriptionstreamsegmentdelta", + "TranscriptionStreamTextDelta": ".transcriptionstreamtextdelta", + "TranscriptionStreamTextDeltaTypedDict": ".transcriptionstreamtextdelta", + "UnarchiveModelResponse": ".unarchivemodelresponse", + "UnarchiveModelResponseTypedDict": ".unarchivemodelresponse", + "UpdateAgentRequest": ".updateagentrequest", + "UpdateAgentRequestTool": ".updateagentrequest", + "UpdateAgentRequestToolTypedDict": ".updateagentrequest", + "UpdateAgentRequestTypedDict": ".updateagentrequest", + "Attributes": ".updatedocumentrequest", + "AttributesTypedDict": ".updatedocumentrequest", + "UpdateDocumentRequest": ".updatedocumentrequest", + "UpdateDocumentRequestTypedDict": ".updatedocumentrequest", + "UpdateLibraryRequest": ".updatelibraryrequest", + "UpdateLibraryRequestTypedDict": ".updatelibraryrequest", + "UpdateModelRequest": ".updatemodelrequest", + "UpdateModelRequestTypedDict": ".updatemodelrequest", + "UsageInfo": ".usageinfo", + "UsageInfoTypedDict": ".usageinfo", + "UserMessage": ".usermessage", + "UserMessageContent": ".usermessage", + "UserMessageContentTypedDict": ".usermessage", + "UserMessageTypedDict": ".usermessage", + "Loc": ".validationerror", + "LocTypedDict": ".validationerror", + "ValidationError": ".validationerror", + "ValidationErrorTypedDict": ".validationerror", + "WandbIntegration": ".wandbintegration", + "WandbIntegrationTypedDict": ".wandbintegration", + "WandbIntegrationResult": ".wandbintegrationresult", + "WandbIntegrationResultTypedDict": ".wandbintegrationresult", + "WebSearchPremiumTool": ".websearchpremiumtool", + "WebSearchPremiumToolTypedDict": ".websearchpremiumtool", + "WebSearchTool": ".websearchtool", + "WebSearchToolTypedDict": ".websearchtool", +} + + +def __getattr__(attr_name: str) -> Any: + return lazy_getattr( + attr_name, package=__package__, dynamic_imports=_dynamic_imports + ) + + +def __dir__(): + return lazy_dir(dynamic_imports=_dynamic_imports) diff --git a/src/mistralai/client/models/agent.py b/src/mistralai/client/models/agent.py new file mode 100644 index 00000000..686a6eb8 --- /dev/null +++ b/src/mistralai/client/models/agent.py @@ -0,0 +1,191 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 1336849c84fb + +from __future__ import annotations +from .codeinterpretertool import CodeInterpreterTool, CodeInterpreterToolTypedDict +from .completionargs import CompletionArgs, CompletionArgsTypedDict +from .documentlibrarytool import DocumentLibraryTool, DocumentLibraryToolTypedDict +from .functiontool import FunctionTool, FunctionToolTypedDict +from .imagegenerationtool import ImageGenerationTool, ImageGenerationToolTypedDict +from .websearchpremiumtool import WebSearchPremiumTool, WebSearchPremiumToolTypedDict +from .websearchtool import WebSearchTool, WebSearchToolTypedDict +from datetime import datetime +from functools import partial +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import validate_const +from mistralai.client.utils.unions import parse_open_union +import pydantic +from pydantic import ConfigDict, model_serializer +from pydantic.functional_validators import AfterValidator, BeforeValidator +from typing import Any, Dict, List, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +AgentToolTypedDict = TypeAliasType( + "AgentToolTypedDict", + Union[ + FunctionToolTypedDict, + WebSearchToolTypedDict, + WebSearchPremiumToolTypedDict, + CodeInterpreterToolTypedDict, + ImageGenerationToolTypedDict, + DocumentLibraryToolTypedDict, + ], +) + + +class UnknownAgentTool(BaseModel): + r"""A AgentTool variant the SDK doesn't recognize. Preserves the raw payload.""" + + type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_AGENT_TOOL_VARIANTS: dict[str, Any] = { + "code_interpreter": CodeInterpreterTool, + "document_library": DocumentLibraryTool, + "function": FunctionTool, + "image_generation": ImageGenerationTool, + "web_search": WebSearchTool, + "web_search_premium": WebSearchPremiumTool, +} + + +AgentTool = Annotated[ + Union[ + CodeInterpreterTool, + DocumentLibraryTool, + FunctionTool, + ImageGenerationTool, + WebSearchTool, + WebSearchPremiumTool, + UnknownAgentTool, + ], + BeforeValidator( + partial( + parse_open_union, + disc_key="type", + variants=_AGENT_TOOL_VARIANTS, + unknown_cls=UnknownAgentTool, + union_name="AgentTool", + ) + ), +] + + +class AgentTypedDict(TypedDict): + model: str + name: str + id: str + version: int + versions: List[int] + created_at: datetime + updated_at: datetime + deployment_chat: bool + source: str + instructions: NotRequired[Nullable[str]] + r"""Instruction prompt the model will follow during the conversation.""" + tools: NotRequired[List[AgentToolTypedDict]] + r"""List of tools which are available to the model during the conversation.""" + completion_args: NotRequired[CompletionArgsTypedDict] + r"""White-listed arguments from the completion API""" + description: NotRequired[Nullable[str]] + handoffs: NotRequired[Nullable[List[str]]] + metadata: NotRequired[Nullable[Dict[str, Any]]] + object: Literal["agent"] + version_message: NotRequired[Nullable[str]] + + +class Agent(BaseModel): + model: str + + name: str + + id: str + + version: int + + versions: List[int] + + created_at: datetime + + updated_at: datetime + + deployment_chat: bool + + source: str + + instructions: OptionalNullable[str] = UNSET + r"""Instruction prompt the model will follow during the conversation.""" + + tools: Optional[List[AgentTool]] = None + r"""List of tools which are available to the model during the conversation.""" + + completion_args: Optional[CompletionArgs] = None + r"""White-listed arguments from the completion API""" + + description: OptionalNullable[str] = UNSET + + handoffs: OptionalNullable[List[str]] = UNSET + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + + object: Annotated[ + Annotated[Optional[Literal["agent"]], AfterValidator(validate_const("agent"))], + pydantic.Field(alias="object"), + ] = "agent" + + version_message: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "instructions", + "tools", + "completion_args", + "description", + "handoffs", + "metadata", + "object", + "version_message", + ] + ) + nullable_fields = set( + ["instructions", "description", "handoffs", "metadata", "version_message"] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + Agent.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/agentaliasresponse.py b/src/mistralai/client/models/agentaliasresponse.py new file mode 100644 index 00000000..6972af2a --- /dev/null +++ b/src/mistralai/client/models/agentaliasresponse.py @@ -0,0 +1,24 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 3899a98a55dd + +from __future__ import annotations +from datetime import datetime +from mistralai.client.types import BaseModel +from typing_extensions import TypedDict + + +class AgentAliasResponseTypedDict(TypedDict): + alias: str + version: int + created_at: datetime + updated_at: datetime + + +class AgentAliasResponse(BaseModel): + alias: str + + version: int + + created_at: datetime + + updated_at: datetime diff --git a/src/mistralai/client/models/agentconversation.py b/src/mistralai/client/models/agentconversation.py new file mode 100644 index 00000000..da30c663 --- /dev/null +++ b/src/mistralai/client/models/agentconversation.py @@ -0,0 +1,105 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 1b7d73eddf51 + +from __future__ import annotations +from datetime import datetime +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Any, Dict, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +AgentConversationAgentVersionTypedDict = TypeAliasType( + "AgentConversationAgentVersionTypedDict", Union[str, int] +) + + +AgentConversationAgentVersion = TypeAliasType( + "AgentConversationAgentVersion", Union[str, int] +) + + +class AgentConversationTypedDict(TypedDict): + id: str + created_at: datetime + updated_at: datetime + agent_id: str + name: NotRequired[Nullable[str]] + r"""Name given to the conversation.""" + description: NotRequired[Nullable[str]] + r"""Description of the what the conversation is about.""" + metadata: NotRequired[Nullable[Dict[str, Any]]] + r"""Custom metadata for the conversation.""" + object: Literal["conversation"] + agent_version: NotRequired[Nullable[AgentConversationAgentVersionTypedDict]] + + +class AgentConversation(BaseModel): + id: str + + created_at: datetime + + updated_at: datetime + + agent_id: str + + name: OptionalNullable[str] = UNSET + r"""Name given to the conversation.""" + + description: OptionalNullable[str] = UNSET + r"""Description of the what the conversation is about.""" + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + r"""Custom metadata for the conversation.""" + + object: Annotated[ + Annotated[ + Optional[Literal["conversation"]], + AfterValidator(validate_const("conversation")), + ], + pydantic.Field(alias="object"), + ] = "conversation" + + agent_version: OptionalNullable[AgentConversationAgentVersion] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + ["name", "description", "metadata", "object", "agent_version"] + ) + nullable_fields = set(["name", "description", "metadata", "agent_version"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + AgentConversation.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/agenthandoffdoneevent.py b/src/mistralai/client/models/agenthandoffdoneevent.py new file mode 100644 index 00000000..e2609e3d --- /dev/null +++ b/src/mistralai/client/models/agenthandoffdoneevent.py @@ -0,0 +1,63 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 82628bb5fcea + +from __future__ import annotations +from datetime import datetime +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class AgentHandoffDoneEventTypedDict(TypedDict): + id: str + next_agent_id: str + next_agent_name: str + type: Literal["agent.handoff.done"] + created_at: NotRequired[datetime] + output_index: NotRequired[int] + + +class AgentHandoffDoneEvent(BaseModel): + id: str + + next_agent_id: str + + next_agent_name: str + + type: Annotated[ + Annotated[ + Literal["agent.handoff.done"], + AfterValidator(validate_const("agent.handoff.done")), + ], + pydantic.Field(alias="type"), + ] = "agent.handoff.done" + + created_at: Optional[datetime] = None + + output_index: Optional[int] = 0 + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["created_at", "output_index"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + AgentHandoffDoneEvent.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/agenthandoffentry.py b/src/mistralai/client/models/agenthandoffentry.py new file mode 100644 index 00000000..f92ef2cc --- /dev/null +++ b/src/mistralai/client/models/agenthandoffentry.py @@ -0,0 +1,90 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 5030bcaa3a07 + +from __future__ import annotations +from datetime import datetime +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class AgentHandoffEntryTypedDict(TypedDict): + previous_agent_id: str + previous_agent_name: str + next_agent_id: str + next_agent_name: str + object: Literal["entry"] + type: Literal["agent.handoff"] + created_at: NotRequired[datetime] + completed_at: NotRequired[Nullable[datetime]] + id: NotRequired[str] + + +class AgentHandoffEntry(BaseModel): + previous_agent_id: str + + previous_agent_name: str + + next_agent_id: str + + next_agent_name: str + + object: Annotated[ + Annotated[Optional[Literal["entry"]], AfterValidator(validate_const("entry"))], + pydantic.Field(alias="object"), + ] = "entry" + + type: Annotated[ + Annotated[ + Optional[Literal["agent.handoff"]], + AfterValidator(validate_const("agent.handoff")), + ], + pydantic.Field(alias="type"), + ] = "agent.handoff" + + created_at: Optional[datetime] = None + + completed_at: OptionalNullable[datetime] = UNSET + + id: Optional[str] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["object", "type", "created_at", "completed_at", "id"]) + nullable_fields = set(["completed_at"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + AgentHandoffEntry.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/agenthandoffstartedevent.py b/src/mistralai/client/models/agenthandoffstartedevent.py new file mode 100644 index 00000000..2a402341 --- /dev/null +++ b/src/mistralai/client/models/agenthandoffstartedevent.py @@ -0,0 +1,63 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 2f6093d9b222 + +from __future__ import annotations +from datetime import datetime +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class AgentHandoffStartedEventTypedDict(TypedDict): + id: str + previous_agent_id: str + previous_agent_name: str + type: Literal["agent.handoff.started"] + created_at: NotRequired[datetime] + output_index: NotRequired[int] + + +class AgentHandoffStartedEvent(BaseModel): + id: str + + previous_agent_id: str + + previous_agent_name: str + + type: Annotated[ + Annotated[ + Literal["agent.handoff.started"], + AfterValidator(validate_const("agent.handoff.started")), + ], + pydantic.Field(alias="type"), + ] = "agent.handoff.started" + + created_at: Optional[datetime] = None + + output_index: Optional[int] = 0 + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["created_at", "output_index"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + AgentHandoffStartedEvent.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/agents_api_v1_agents_create_or_update_aliasop.py b/src/mistralai/client/models/agents_api_v1_agents_create_or_update_aliasop.py new file mode 100644 index 00000000..04761ae7 --- /dev/null +++ b/src/mistralai/client/models/agents_api_v1_agents_create_or_update_aliasop.py @@ -0,0 +1,27 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 23a832f8f175 + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata +from typing_extensions import Annotated, TypedDict + + +class AgentsAPIV1AgentsCreateOrUpdateAliasRequestTypedDict(TypedDict): + agent_id: str + alias: str + version: int + + +class AgentsAPIV1AgentsCreateOrUpdateAliasRequest(BaseModel): + agent_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + alias: Annotated[ + str, FieldMetadata(query=QueryParamMetadata(style="form", explode=True)) + ] + + version: Annotated[ + int, FieldMetadata(query=QueryParamMetadata(style="form", explode=True)) + ] diff --git a/src/mistralai/client/models/agents_api_v1_agents_delete_aliasop.py b/src/mistralai/client/models/agents_api_v1_agents_delete_aliasop.py new file mode 100644 index 00000000..291a9802 --- /dev/null +++ b/src/mistralai/client/models/agents_api_v1_agents_delete_aliasop.py @@ -0,0 +1,22 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 9c9947e768d3 + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata +from typing_extensions import Annotated, TypedDict + + +class AgentsAPIV1AgentsDeleteAliasRequestTypedDict(TypedDict): + agent_id: str + alias: str + + +class AgentsAPIV1AgentsDeleteAliasRequest(BaseModel): + agent_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + alias: Annotated[ + str, FieldMetadata(query=QueryParamMetadata(style="form", explode=True)) + ] diff --git a/src/mistralai/client/models/agents_api_v1_agents_deleteop.py b/src/mistralai/client/models/agents_api_v1_agents_deleteop.py new file mode 100644 index 00000000..5e41fdcd --- /dev/null +++ b/src/mistralai/client/models/agents_api_v1_agents_deleteop.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 95adb6768908 + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class AgentsAPIV1AgentsDeleteRequestTypedDict(TypedDict): + agent_id: str + + +class AgentsAPIV1AgentsDeleteRequest(BaseModel): + agent_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/client/models/agents_api_v1_agents_get_versionop.py b/src/mistralai/client/models/agents_api_v1_agents_get_versionop.py new file mode 100644 index 00000000..941863d0 --- /dev/null +++ b/src/mistralai/client/models/agents_api_v1_agents_get_versionop.py @@ -0,0 +1,22 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: ef9914284afb + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class AgentsAPIV1AgentsGetVersionRequestTypedDict(TypedDict): + agent_id: str + version: str + + +class AgentsAPIV1AgentsGetVersionRequest(BaseModel): + agent_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + version: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/client/models/agents_api_v1_agents_getop.py b/src/mistralai/client/models/agents_api_v1_agents_getop.py new file mode 100644 index 00000000..dd17580d --- /dev/null +++ b/src/mistralai/client/models/agents_api_v1_agents_getop.py @@ -0,0 +1,66 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: f5918c34f1c7 + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata +from pydantic import model_serializer +from typing import Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +AgentsAPIV1AgentsGetAgentVersionTypedDict = TypeAliasType( + "AgentsAPIV1AgentsGetAgentVersionTypedDict", Union[int, str] +) + + +AgentsAPIV1AgentsGetAgentVersion = TypeAliasType( + "AgentsAPIV1AgentsGetAgentVersion", Union[int, str] +) + + +class AgentsAPIV1AgentsGetRequestTypedDict(TypedDict): + agent_id: str + agent_version: NotRequired[Nullable[AgentsAPIV1AgentsGetAgentVersionTypedDict]] + + +class AgentsAPIV1AgentsGetRequest(BaseModel): + agent_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + agent_version: Annotated[ + OptionalNullable[AgentsAPIV1AgentsGetAgentVersion], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["agent_version"]) + nullable_fields = set(["agent_version"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/agents_api_v1_agents_list_version_aliasesop.py b/src/mistralai/client/models/agents_api_v1_agents_list_version_aliasesop.py new file mode 100644 index 00000000..bb1da602 --- /dev/null +++ b/src/mistralai/client/models/agents_api_v1_agents_list_version_aliasesop.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: a04815e6c798 + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class AgentsAPIV1AgentsListVersionAliasesRequestTypedDict(TypedDict): + agent_id: str + + +class AgentsAPIV1AgentsListVersionAliasesRequest(BaseModel): + agent_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/client/models/agents_api_v1_agents_list_versionsop.py b/src/mistralai/client/models/agents_api_v1_agents_list_versionsop.py new file mode 100644 index 00000000..54b62e90 --- /dev/null +++ b/src/mistralai/client/models/agents_api_v1_agents_list_versionsop.py @@ -0,0 +1,51 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 19e3310c3907 + +from __future__ import annotations +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata +from pydantic import model_serializer +from typing import Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class AgentsAPIV1AgentsListVersionsRequestTypedDict(TypedDict): + agent_id: str + page: NotRequired[int] + r"""Page number (0-indexed)""" + page_size: NotRequired[int] + r"""Number of versions per page""" + + +class AgentsAPIV1AgentsListVersionsRequest(BaseModel): + agent_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + page: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 0 + r"""Page number (0-indexed)""" + + page_size: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 20 + r"""Number of versions per page""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["page", "page_size"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/src/mistralai/client/models/agents_api_v1_agents_listop.py b/src/mistralai/client/models/agents_api_v1_agents_listop.py new file mode 100644 index 00000000..97b1c7f1 --- /dev/null +++ b/src/mistralai/client/models/agents_api_v1_agents_listop.py @@ -0,0 +1,115 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 25a6460a6e19 + +from __future__ import annotations +from .requestsource import RequestSource +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import FieldMetadata, QueryParamMetadata +from pydantic import model_serializer +from typing import Any, Dict, List, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class AgentsAPIV1AgentsListRequestTypedDict(TypedDict): + page: NotRequired[int] + r"""Page number (0-indexed)""" + page_size: NotRequired[int] + r"""Number of agents per page""" + deployment_chat: NotRequired[Nullable[bool]] + sources: NotRequired[Nullable[List[RequestSource]]] + name: NotRequired[Nullable[str]] + r"""Filter by agent name""" + search: NotRequired[Nullable[str]] + r"""Search agents by name or ID""" + id: NotRequired[Nullable[str]] + metadata: NotRequired[Nullable[Dict[str, Any]]] + + +class AgentsAPIV1AgentsListRequest(BaseModel): + page: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 0 + r"""Page number (0-indexed)""" + + page_size: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 20 + r"""Number of agents per page""" + + deployment_chat: Annotated[ + OptionalNullable[bool], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + sources: Annotated[ + OptionalNullable[List[RequestSource]], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + name: Annotated[ + OptionalNullable[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + r"""Filter by agent name""" + + search: Annotated[ + OptionalNullable[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + r"""Search agents by name or ID""" + + id: Annotated[ + OptionalNullable[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + metadata: Annotated[ + OptionalNullable[Dict[str, Any]], + FieldMetadata(query=QueryParamMetadata(serialization="json")), + ] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "page", + "page_size", + "deployment_chat", + "sources", + "name", + "search", + "id", + "metadata", + ] + ) + nullable_fields = set( + ["deployment_chat", "sources", "name", "search", "id", "metadata"] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/agents_api_v1_agents_update_versionop.py b/src/mistralai/client/models/agents_api_v1_agents_update_versionop.py new file mode 100644 index 00000000..5ab821ea --- /dev/null +++ b/src/mistralai/client/models/agents_api_v1_agents_update_versionop.py @@ -0,0 +1,22 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 63f61b8891bf + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata +from typing_extensions import Annotated, TypedDict + + +class AgentsAPIV1AgentsUpdateVersionRequestTypedDict(TypedDict): + agent_id: str + version: int + + +class AgentsAPIV1AgentsUpdateVersionRequest(BaseModel): + agent_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + version: Annotated[ + int, FieldMetadata(query=QueryParamMetadata(style="form", explode=True)) + ] diff --git a/src/mistralai/client/models/agents_api_v1_agents_updateop.py b/src/mistralai/client/models/agents_api_v1_agents_updateop.py new file mode 100644 index 00000000..69da5001 --- /dev/null +++ b/src/mistralai/client/models/agents_api_v1_agents_updateop.py @@ -0,0 +1,24 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: bb55993c932d + +from __future__ import annotations +from .updateagentrequest import UpdateAgentRequest, UpdateAgentRequestTypedDict +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, RequestMetadata +from typing_extensions import Annotated, TypedDict + + +class AgentsAPIV1AgentsUpdateRequestTypedDict(TypedDict): + agent_id: str + update_agent_request: UpdateAgentRequestTypedDict + + +class AgentsAPIV1AgentsUpdateRequest(BaseModel): + agent_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + update_agent_request: Annotated[ + UpdateAgentRequest, + FieldMetadata(request=RequestMetadata(media_type="application/json")), + ] diff --git a/src/mistralai/client/models/agents_api_v1_conversations_append_streamop.py b/src/mistralai/client/models/agents_api_v1_conversations_append_streamop.py new file mode 100644 index 00000000..d257dc78 --- /dev/null +++ b/src/mistralai/client/models/agents_api_v1_conversations_append_streamop.py @@ -0,0 +1,29 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: ec00e0905f15 + +from __future__ import annotations +from .conversationappendstreamrequest import ( + ConversationAppendStreamRequest, + ConversationAppendStreamRequestTypedDict, +) +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, RequestMetadata +from typing_extensions import Annotated, TypedDict + + +class AgentsAPIV1ConversationsAppendStreamRequestTypedDict(TypedDict): + conversation_id: str + r"""ID of the conversation to which we append entries.""" + conversation_append_stream_request: ConversationAppendStreamRequestTypedDict + + +class AgentsAPIV1ConversationsAppendStreamRequest(BaseModel): + conversation_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + r"""ID of the conversation to which we append entries.""" + + conversation_append_stream_request: Annotated[ + ConversationAppendStreamRequest, + FieldMetadata(request=RequestMetadata(media_type="application/json")), + ] diff --git a/src/mistralai/client/models/agents_api_v1_conversations_appendop.py b/src/mistralai/client/models/agents_api_v1_conversations_appendop.py new file mode 100644 index 00000000..61fec083 --- /dev/null +++ b/src/mistralai/client/models/agents_api_v1_conversations_appendop.py @@ -0,0 +1,29 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 39c6125e850c + +from __future__ import annotations +from .conversationappendrequest import ( + ConversationAppendRequest, + ConversationAppendRequestTypedDict, +) +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, RequestMetadata +from typing_extensions import Annotated, TypedDict + + +class AgentsAPIV1ConversationsAppendRequestTypedDict(TypedDict): + conversation_id: str + r"""ID of the conversation to which we append entries.""" + conversation_append_request: ConversationAppendRequestTypedDict + + +class AgentsAPIV1ConversationsAppendRequest(BaseModel): + conversation_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + r"""ID of the conversation to which we append entries.""" + + conversation_append_request: Annotated[ + ConversationAppendRequest, + FieldMetadata(request=RequestMetadata(media_type="application/json")), + ] diff --git a/src/mistralai/client/models/agents_api_v1_conversations_deleteop.py b/src/mistralai/client/models/agents_api_v1_conversations_deleteop.py new file mode 100644 index 00000000..499645a7 --- /dev/null +++ b/src/mistralai/client/models/agents_api_v1_conversations_deleteop.py @@ -0,0 +1,19 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 0792e6abbdcb + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class AgentsAPIV1ConversationsDeleteRequestTypedDict(TypedDict): + conversation_id: str + r"""ID of the conversation from which we are fetching metadata.""" + + +class AgentsAPIV1ConversationsDeleteRequest(BaseModel): + conversation_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + r"""ID of the conversation from which we are fetching metadata.""" diff --git a/src/mistralai/client/models/agents_api_v1_conversations_getop.py b/src/mistralai/client/models/agents_api_v1_conversations_getop.py new file mode 100644 index 00000000..504616ab --- /dev/null +++ b/src/mistralai/client/models/agents_api_v1_conversations_getop.py @@ -0,0 +1,35 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: c530f2fc64d0 + +from __future__ import annotations +from .agentconversation import AgentConversation, AgentConversationTypedDict +from .modelconversation import ModelConversation, ModelConversationTypedDict +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing import Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +class AgentsAPIV1ConversationsGetRequestTypedDict(TypedDict): + conversation_id: str + r"""ID of the conversation from which we are fetching metadata.""" + + +class AgentsAPIV1ConversationsGetRequest(BaseModel): + conversation_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + r"""ID of the conversation from which we are fetching metadata.""" + + +ResponseV1ConversationsGetTypedDict = TypeAliasType( + "ResponseV1ConversationsGetTypedDict", + Union[AgentConversationTypedDict, ModelConversationTypedDict], +) +r"""Successful Response""" + + +ResponseV1ConversationsGet = TypeAliasType( + "ResponseV1ConversationsGet", Union[AgentConversation, ModelConversation] +) +r"""Successful Response""" diff --git a/src/mistralai/client/models/agents_api_v1_conversations_historyop.py b/src/mistralai/client/models/agents_api_v1_conversations_historyop.py new file mode 100644 index 00000000..ef0a4eb0 --- /dev/null +++ b/src/mistralai/client/models/agents_api_v1_conversations_historyop.py @@ -0,0 +1,19 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 2f5ca33768aa + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class AgentsAPIV1ConversationsHistoryRequestTypedDict(TypedDict): + conversation_id: str + r"""ID of the conversation from which we are fetching entries.""" + + +class AgentsAPIV1ConversationsHistoryRequest(BaseModel): + conversation_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + r"""ID of the conversation from which we are fetching entries.""" diff --git a/src/mistralai/client/models/agents_api_v1_conversations_listop.py b/src/mistralai/client/models/agents_api_v1_conversations_listop.py new file mode 100644 index 00000000..8bf66aea --- /dev/null +++ b/src/mistralai/client/models/agents_api_v1_conversations_listop.py @@ -0,0 +1,76 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 936e36181d36 + +from __future__ import annotations +from .agentconversation import AgentConversation, AgentConversationTypedDict +from .modelconversation import ModelConversation, ModelConversationTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import FieldMetadata, QueryParamMetadata +from pydantic import model_serializer +from typing import Any, Dict, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +class AgentsAPIV1ConversationsListRequestTypedDict(TypedDict): + page: NotRequired[int] + page_size: NotRequired[int] + metadata: NotRequired[Nullable[Dict[str, Any]]] + + +class AgentsAPIV1ConversationsListRequest(BaseModel): + page: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 0 + + page_size: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 100 + + metadata: Annotated[ + OptionalNullable[Dict[str, Any]], + FieldMetadata(query=QueryParamMetadata(serialization="json")), + ] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["page", "page_size", "metadata"]) + nullable_fields = set(["metadata"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +AgentsAPIV1ConversationsListResponseTypedDict = TypeAliasType( + "AgentsAPIV1ConversationsListResponseTypedDict", + Union[AgentConversationTypedDict, ModelConversationTypedDict], +) + + +AgentsAPIV1ConversationsListResponse = TypeAliasType( + "AgentsAPIV1ConversationsListResponse", Union[AgentConversation, ModelConversation] +) diff --git a/src/mistralai/client/models/agents_api_v1_conversations_messagesop.py b/src/mistralai/client/models/agents_api_v1_conversations_messagesop.py new file mode 100644 index 00000000..19978a19 --- /dev/null +++ b/src/mistralai/client/models/agents_api_v1_conversations_messagesop.py @@ -0,0 +1,19 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: b5141764a708 + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class AgentsAPIV1ConversationsMessagesRequestTypedDict(TypedDict): + conversation_id: str + r"""ID of the conversation from which we are fetching messages.""" + + +class AgentsAPIV1ConversationsMessagesRequest(BaseModel): + conversation_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + r"""ID of the conversation from which we are fetching messages.""" diff --git a/src/mistralai/client/models/agents_api_v1_conversations_restart_streamop.py b/src/mistralai/client/models/agents_api_v1_conversations_restart_streamop.py new file mode 100644 index 00000000..63c74449 --- /dev/null +++ b/src/mistralai/client/models/agents_api_v1_conversations_restart_streamop.py @@ -0,0 +1,29 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: c284a1711148 + +from __future__ import annotations +from .conversationrestartstreamrequest import ( + ConversationRestartStreamRequest, + ConversationRestartStreamRequestTypedDict, +) +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, RequestMetadata +from typing_extensions import Annotated, TypedDict + + +class AgentsAPIV1ConversationsRestartStreamRequestTypedDict(TypedDict): + conversation_id: str + r"""ID of the original conversation which is being restarted.""" + conversation_restart_stream_request: ConversationRestartStreamRequestTypedDict + + +class AgentsAPIV1ConversationsRestartStreamRequest(BaseModel): + conversation_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + r"""ID of the original conversation which is being restarted.""" + + conversation_restart_stream_request: Annotated[ + ConversationRestartStreamRequest, + FieldMetadata(request=RequestMetadata(media_type="application/json")), + ] diff --git a/src/mistralai/client/models/agents_api_v1_conversations_restartop.py b/src/mistralai/client/models/agents_api_v1_conversations_restartop.py new file mode 100644 index 00000000..3186d5df --- /dev/null +++ b/src/mistralai/client/models/agents_api_v1_conversations_restartop.py @@ -0,0 +1,29 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 3ba234e5a8fc + +from __future__ import annotations +from .conversationrestartrequest import ( + ConversationRestartRequest, + ConversationRestartRequestTypedDict, +) +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, RequestMetadata +from typing_extensions import Annotated, TypedDict + + +class AgentsAPIV1ConversationsRestartRequestTypedDict(TypedDict): + conversation_id: str + r"""ID of the original conversation which is being restarted.""" + conversation_restart_request: ConversationRestartRequestTypedDict + + +class AgentsAPIV1ConversationsRestartRequest(BaseModel): + conversation_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + r"""ID of the original conversation which is being restarted.""" + + conversation_restart_request: Annotated[ + ConversationRestartRequest, + FieldMetadata(request=RequestMetadata(media_type="application/json")), + ] diff --git a/src/mistralai/client/models/agentscompletionrequest.py b/src/mistralai/client/models/agentscompletionrequest.py new file mode 100644 index 00000000..6955f6ac --- /dev/null +++ b/src/mistralai/client/models/agentscompletionrequest.py @@ -0,0 +1,191 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 3960bc4c545f + +from __future__ import annotations +from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .mistralpromptmode import MistralPromptMode +from .prediction import Prediction, PredictionTypedDict +from .responseformat import ResponseFormat, ResponseFormatTypedDict +from .systemmessage import SystemMessage, SystemMessageTypedDict +from .tool import Tool, ToolTypedDict +from .toolchoice import ToolChoice, ToolChoiceTypedDict +from .toolchoiceenum import ToolChoiceEnum +from .toolmessage import ToolMessage, ToolMessageTypedDict +from .usermessage import UserMessage, UserMessageTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import get_discriminator +from pydantic import Discriminator, Tag, model_serializer +from typing import Any, Dict, List, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +AgentsCompletionRequestStopTypedDict = TypeAliasType( + "AgentsCompletionRequestStopTypedDict", Union[str, List[str]] +) +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +AgentsCompletionRequestStop = TypeAliasType( + "AgentsCompletionRequestStop", Union[str, List[str]] +) +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +AgentsCompletionRequestMessageTypedDict = TypeAliasType( + "AgentsCompletionRequestMessageTypedDict", + Union[ + SystemMessageTypedDict, + UserMessageTypedDict, + AssistantMessageTypedDict, + ToolMessageTypedDict, + ], +) + + +AgentsCompletionRequestMessage = Annotated[ + Union[ + Annotated[AssistantMessage, Tag("assistant")], + Annotated[SystemMessage, Tag("system")], + Annotated[ToolMessage, Tag("tool")], + Annotated[UserMessage, Tag("user")], + ], + Discriminator(lambda m: get_discriminator(m, "role", "role")), +] + + +AgentsCompletionRequestToolChoiceTypedDict = TypeAliasType( + "AgentsCompletionRequestToolChoiceTypedDict", + Union[ToolChoiceTypedDict, ToolChoiceEnum], +) + + +AgentsCompletionRequestToolChoice = TypeAliasType( + "AgentsCompletionRequestToolChoice", Union[ToolChoice, ToolChoiceEnum] +) + + +class AgentsCompletionRequestTypedDict(TypedDict): + messages: List[AgentsCompletionRequestMessageTypedDict] + r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + agent_id: str + r"""The ID of the agent to use for this completion.""" + max_tokens: NotRequired[Nullable[int]] + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + stream: NotRequired[bool] + r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" + stop: NotRequired[AgentsCompletionRequestStopTypedDict] + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: NotRequired[Nullable[int]] + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + metadata: NotRequired[Nullable[Dict[str, Any]]] + response_format: NotRequired[ResponseFormatTypedDict] + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" + tools: NotRequired[Nullable[List[ToolTypedDict]]] + tool_choice: NotRequired[AgentsCompletionRequestToolChoiceTypedDict] + presence_penalty: NotRequired[float] + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + frequency_penalty: NotRequired[float] + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + n: NotRequired[Nullable[int]] + r"""Number of completions to return for each request, input tokens are only billed once.""" + prediction: NotRequired[PredictionTypedDict] + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" + parallel_tool_calls: NotRequired[bool] + prompt_mode: NotRequired[Nullable[MistralPromptMode]] + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" + + +class AgentsCompletionRequest(BaseModel): + messages: List[AgentsCompletionRequestMessage] + r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + + agent_id: str + r"""The ID of the agent to use for this completion.""" + + max_tokens: OptionalNullable[int] = UNSET + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + + stream: Optional[bool] = False + r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" + + stop: Optional[AgentsCompletionRequestStop] = None + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + random_seed: OptionalNullable[int] = UNSET + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + + response_format: Optional[ResponseFormat] = None + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" + + tools: OptionalNullable[List[Tool]] = UNSET + + tool_choice: Optional[AgentsCompletionRequestToolChoice] = None + + presence_penalty: Optional[float] = None + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + + frequency_penalty: Optional[float] = None + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + + n: OptionalNullable[int] = UNSET + r"""Number of completions to return for each request, input tokens are only billed once.""" + + prediction: Optional[Prediction] = None + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" + + parallel_tool_calls: Optional[bool] = None + + prompt_mode: OptionalNullable[MistralPromptMode] = UNSET + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "max_tokens", + "stream", + "stop", + "random_seed", + "metadata", + "response_format", + "tools", + "tool_choice", + "presence_penalty", + "frequency_penalty", + "n", + "prediction", + "parallel_tool_calls", + "prompt_mode", + ] + ) + nullable_fields = set( + ["max_tokens", "random_seed", "metadata", "tools", "n", "prompt_mode"] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/agentscompletionstreamrequest.py b/src/mistralai/client/models/agentscompletionstreamrequest.py new file mode 100644 index 00000000..c2cf3552 --- /dev/null +++ b/src/mistralai/client/models/agentscompletionstreamrequest.py @@ -0,0 +1,189 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 1b73f90befc2 + +from __future__ import annotations +from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .mistralpromptmode import MistralPromptMode +from .prediction import Prediction, PredictionTypedDict +from .responseformat import ResponseFormat, ResponseFormatTypedDict +from .systemmessage import SystemMessage, SystemMessageTypedDict +from .tool import Tool, ToolTypedDict +from .toolchoice import ToolChoice, ToolChoiceTypedDict +from .toolchoiceenum import ToolChoiceEnum +from .toolmessage import ToolMessage, ToolMessageTypedDict +from .usermessage import UserMessage, UserMessageTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import get_discriminator +from pydantic import Discriminator, Tag, model_serializer +from typing import Any, Dict, List, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +AgentsCompletionStreamRequestStopTypedDict = TypeAliasType( + "AgentsCompletionStreamRequestStopTypedDict", Union[str, List[str]] +) +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +AgentsCompletionStreamRequestStop = TypeAliasType( + "AgentsCompletionStreamRequestStop", Union[str, List[str]] +) +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +AgentsCompletionStreamRequestMessageTypedDict = TypeAliasType( + "AgentsCompletionStreamRequestMessageTypedDict", + Union[ + SystemMessageTypedDict, + UserMessageTypedDict, + AssistantMessageTypedDict, + ToolMessageTypedDict, + ], +) + + +AgentsCompletionStreamRequestMessage = Annotated[ + Union[ + Annotated[AssistantMessage, Tag("assistant")], + Annotated[SystemMessage, Tag("system")], + Annotated[ToolMessage, Tag("tool")], + Annotated[UserMessage, Tag("user")], + ], + Discriminator(lambda m: get_discriminator(m, "role", "role")), +] + + +AgentsCompletionStreamRequestToolChoiceTypedDict = TypeAliasType( + "AgentsCompletionStreamRequestToolChoiceTypedDict", + Union[ToolChoiceTypedDict, ToolChoiceEnum], +) + + +AgentsCompletionStreamRequestToolChoice = TypeAliasType( + "AgentsCompletionStreamRequestToolChoice", Union[ToolChoice, ToolChoiceEnum] +) + + +class AgentsCompletionStreamRequestTypedDict(TypedDict): + messages: List[AgentsCompletionStreamRequestMessageTypedDict] + r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + agent_id: str + r"""The ID of the agent to use for this completion.""" + max_tokens: NotRequired[Nullable[int]] + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + stream: NotRequired[bool] + stop: NotRequired[AgentsCompletionStreamRequestStopTypedDict] + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: NotRequired[Nullable[int]] + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + metadata: NotRequired[Nullable[Dict[str, Any]]] + response_format: NotRequired[ResponseFormatTypedDict] + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" + tools: NotRequired[Nullable[List[ToolTypedDict]]] + tool_choice: NotRequired[AgentsCompletionStreamRequestToolChoiceTypedDict] + presence_penalty: NotRequired[float] + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + frequency_penalty: NotRequired[float] + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + n: NotRequired[Nullable[int]] + r"""Number of completions to return for each request, input tokens are only billed once.""" + prediction: NotRequired[PredictionTypedDict] + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" + parallel_tool_calls: NotRequired[bool] + prompt_mode: NotRequired[Nullable[MistralPromptMode]] + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" + + +class AgentsCompletionStreamRequest(BaseModel): + messages: List[AgentsCompletionStreamRequestMessage] + r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + + agent_id: str + r"""The ID of the agent to use for this completion.""" + + max_tokens: OptionalNullable[int] = UNSET + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + + stream: Optional[bool] = True + + stop: Optional[AgentsCompletionStreamRequestStop] = None + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + random_seed: OptionalNullable[int] = UNSET + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + + response_format: Optional[ResponseFormat] = None + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" + + tools: OptionalNullable[List[Tool]] = UNSET + + tool_choice: Optional[AgentsCompletionStreamRequestToolChoice] = None + + presence_penalty: Optional[float] = None + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + + frequency_penalty: Optional[float] = None + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + + n: OptionalNullable[int] = UNSET + r"""Number of completions to return for each request, input tokens are only billed once.""" + + prediction: Optional[Prediction] = None + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" + + parallel_tool_calls: Optional[bool] = None + + prompt_mode: OptionalNullable[MistralPromptMode] = UNSET + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "max_tokens", + "stream", + "stop", + "random_seed", + "metadata", + "response_format", + "tools", + "tool_choice", + "presence_penalty", + "frequency_penalty", + "n", + "prediction", + "parallel_tool_calls", + "prompt_mode", + ] + ) + nullable_fields = set( + ["max_tokens", "random_seed", "metadata", "tools", "n", "prompt_mode"] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/apiendpoint.py b/src/mistralai/client/models/apiendpoint.py new file mode 100644 index 00000000..a6665c10 --- /dev/null +++ b/src/mistralai/client/models/apiendpoint.py @@ -0,0 +1,23 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 00b34ce0a24d + +from __future__ import annotations +from mistralai.client.types import UnrecognizedStr +from typing import Literal, Union + + +APIEndpoint = Union[ + Literal[ + "/v1/chat/completions", + "/v1/embeddings", + "/v1/fim/completions", + "/v1/moderations", + "/v1/chat/moderations", + "/v1/ocr", + "/v1/classifications", + "/v1/chat/classifications", + "/v1/conversations", + "/v1/audio/transcriptions", + ], + UnrecognizedStr, +] diff --git a/src/mistralai/client/models/archivemodelresponse.py b/src/mistralai/client/models/archivemodelresponse.py new file mode 100644 index 00000000..f1116850 --- /dev/null +++ b/src/mistralai/client/models/archivemodelresponse.py @@ -0,0 +1,50 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 2d22c644df64 + +from __future__ import annotations +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class ArchiveModelResponseTypedDict(TypedDict): + id: str + object: Literal["model"] + archived: NotRequired[bool] + + +class ArchiveModelResponse(BaseModel): + id: str + + object: Annotated[ + Annotated[Optional[Literal["model"]], AfterValidator(validate_const("model"))], + pydantic.Field(alias="object"), + ] = "model" + + archived: Optional[bool] = True + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["object", "archived"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ArchiveModelResponse.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/assistantmessage.py b/src/mistralai/client/models/assistantmessage.py new file mode 100644 index 00000000..26a778c7 --- /dev/null +++ b/src/mistralai/client/models/assistantmessage.py @@ -0,0 +1,84 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 2b49546e0742 + +from __future__ import annotations +from .contentchunk import ContentChunk, ContentChunkTypedDict +from .toolcall import ToolCall, ToolCallTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +AssistantMessageContentTypedDict = TypeAliasType( + "AssistantMessageContentTypedDict", Union[str, List[ContentChunkTypedDict]] +) + + +AssistantMessageContent = TypeAliasType( + "AssistantMessageContent", Union[str, List[ContentChunk]] +) + + +class AssistantMessageTypedDict(TypedDict): + role: Literal["assistant"] + content: NotRequired[Nullable[AssistantMessageContentTypedDict]] + tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] + prefix: NotRequired[bool] + r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" + + +class AssistantMessage(BaseModel): + role: Annotated[ + Annotated[ + Optional[Literal["assistant"]], AfterValidator(validate_const("assistant")) + ], + pydantic.Field(alias="role"), + ] = "assistant" + + content: OptionalNullable[AssistantMessageContent] = UNSET + + tool_calls: OptionalNullable[List[ToolCall]] = UNSET + + prefix: Optional[bool] = False + r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["role", "content", "tool_calls", "prefix"]) + nullable_fields = set(["content", "tool_calls"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + AssistantMessage.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/audiochunk.py b/src/mistralai/client/models/audiochunk.py new file mode 100644 index 00000000..68866cd2 --- /dev/null +++ b/src/mistralai/client/models/audiochunk.py @@ -0,0 +1,32 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: ce5dce4dced2 + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import Literal +from typing_extensions import Annotated, TypedDict + + +class AudioChunkTypedDict(TypedDict): + input_audio: str + type: Literal["input_audio"] + + +class AudioChunk(BaseModel): + input_audio: str + + type: Annotated[ + Annotated[ + Literal["input_audio"], AfterValidator(validate_const("input_audio")) + ], + pydantic.Field(alias="type"), + ] = "input_audio" + + +try: + AudioChunk.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/audioencoding.py b/src/mistralai/client/models/audioencoding.py new file mode 100644 index 00000000..67fec75d --- /dev/null +++ b/src/mistralai/client/models/audioencoding.py @@ -0,0 +1,19 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: b14e6a50f730 + +from __future__ import annotations +from mistralai.client.types import UnrecognizedStr +from typing import Literal, Union + + +AudioEncoding = Union[ + Literal[ + "pcm_s16le", + "pcm_s32le", + "pcm_f16le", + "pcm_f32le", + "pcm_mulaw", + "pcm_alaw", + ], + UnrecognizedStr, +] diff --git a/src/mistralai/client/models/audioformat.py b/src/mistralai/client/models/audioformat.py new file mode 100644 index 00000000..fef87ae7 --- /dev/null +++ b/src/mistralai/client/models/audioformat.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: c8655712c218 + +from __future__ import annotations +from .audioencoding import AudioEncoding +from mistralai.client.types import BaseModel +from typing_extensions import TypedDict + + +class AudioFormatTypedDict(TypedDict): + encoding: AudioEncoding + sample_rate: int + + +class AudioFormat(BaseModel): + encoding: AudioEncoding + + sample_rate: int diff --git a/src/mistralai/client/models/audiotranscriptionrequest.py b/src/mistralai/client/models/audiotranscriptionrequest.py new file mode 100644 index 00000000..fe4c79e3 --- /dev/null +++ b/src/mistralai/client/models/audiotranscriptionrequest.py @@ -0,0 +1,117 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: e4148b4d23e7 + +from __future__ import annotations +from .file import File, FileTypedDict +from .timestampgranularity import TimestampGranularity +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import FieldMetadata, MultipartFormMetadata, validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class AudioTranscriptionRequestTypedDict(TypedDict): + model: str + r"""ID of the model to be used.""" + file: NotRequired[FileTypedDict] + file_url: NotRequired[Nullable[str]] + r"""Url of a file to be transcribed""" + file_id: NotRequired[Nullable[str]] + r"""ID of a file uploaded to /v1/files""" + language: NotRequired[Nullable[str]] + r"""Language of the audio, e.g. 'en'. Providing the language can boost accuracy.""" + temperature: NotRequired[Nullable[float]] + stream: Literal[False] + diarize: NotRequired[bool] + context_bias: NotRequired[List[str]] + timestamp_granularities: NotRequired[List[TimestampGranularity]] + r"""Granularities of timestamps to include in the response.""" + + +class AudioTranscriptionRequest(BaseModel): + model: Annotated[str, FieldMetadata(multipart=True)] + r"""ID of the model to be used.""" + + file: Annotated[ + Optional[File], FieldMetadata(multipart=MultipartFormMetadata(file=True)) + ] = None + + file_url: Annotated[OptionalNullable[str], FieldMetadata(multipart=True)] = UNSET + r"""Url of a file to be transcribed""" + + file_id: Annotated[OptionalNullable[str], FieldMetadata(multipart=True)] = UNSET + r"""ID of a file uploaded to /v1/files""" + + language: Annotated[OptionalNullable[str], FieldMetadata(multipart=True)] = UNSET + r"""Language of the audio, e.g. 'en'. Providing the language can boost accuracy.""" + + temperature: Annotated[OptionalNullable[float], FieldMetadata(multipart=True)] = ( + UNSET + ) + + stream: Annotated[ + Annotated[Optional[Literal[False]], AfterValidator(validate_const(False))], + pydantic.Field(alias="stream"), + FieldMetadata(multipart=True), + ] = False + + diarize: Annotated[Optional[bool], FieldMetadata(multipart=True)] = False + + context_bias: Annotated[Optional[List[str]], FieldMetadata(multipart=True)] = None + + timestamp_granularities: Annotated[ + Optional[List[TimestampGranularity]], FieldMetadata(multipart=True) + ] = None + r"""Granularities of timestamps to include in the response.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "file", + "file_url", + "file_id", + "language", + "temperature", + "stream", + "diarize", + "context_bias", + "timestamp_granularities", + ] + ) + nullable_fields = set(["file_url", "file_id", "language", "temperature"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + AudioTranscriptionRequest.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/audiotranscriptionrequeststream.py b/src/mistralai/client/models/audiotranscriptionrequeststream.py new file mode 100644 index 00000000..2d1e9269 --- /dev/null +++ b/src/mistralai/client/models/audiotranscriptionrequeststream.py @@ -0,0 +1,115 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 33a07317a3b3 + +from __future__ import annotations +from .file import File, FileTypedDict +from .timestampgranularity import TimestampGranularity +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import FieldMetadata, MultipartFormMetadata, validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class AudioTranscriptionRequestStreamTypedDict(TypedDict): + model: str + file: NotRequired[FileTypedDict] + file_url: NotRequired[Nullable[str]] + r"""Url of a file to be transcribed""" + file_id: NotRequired[Nullable[str]] + r"""ID of a file uploaded to /v1/files""" + language: NotRequired[Nullable[str]] + r"""Language of the audio, e.g. 'en'. Providing the language can boost accuracy.""" + temperature: NotRequired[Nullable[float]] + stream: Literal[True] + diarize: NotRequired[bool] + context_bias: NotRequired[List[str]] + timestamp_granularities: NotRequired[List[TimestampGranularity]] + r"""Granularities of timestamps to include in the response.""" + + +class AudioTranscriptionRequestStream(BaseModel): + model: Annotated[str, FieldMetadata(multipart=True)] + + file: Annotated[ + Optional[File], FieldMetadata(multipart=MultipartFormMetadata(file=True)) + ] = None + + file_url: Annotated[OptionalNullable[str], FieldMetadata(multipart=True)] = UNSET + r"""Url of a file to be transcribed""" + + file_id: Annotated[OptionalNullable[str], FieldMetadata(multipart=True)] = UNSET + r"""ID of a file uploaded to /v1/files""" + + language: Annotated[OptionalNullable[str], FieldMetadata(multipart=True)] = UNSET + r"""Language of the audio, e.g. 'en'. Providing the language can boost accuracy.""" + + temperature: Annotated[OptionalNullable[float], FieldMetadata(multipart=True)] = ( + UNSET + ) + + stream: Annotated[ + Annotated[Optional[Literal[True]], AfterValidator(validate_const(True))], + pydantic.Field(alias="stream"), + FieldMetadata(multipart=True), + ] = True + + diarize: Annotated[Optional[bool], FieldMetadata(multipart=True)] = False + + context_bias: Annotated[Optional[List[str]], FieldMetadata(multipart=True)] = None + + timestamp_granularities: Annotated[ + Optional[List[TimestampGranularity]], FieldMetadata(multipart=True) + ] = None + r"""Granularities of timestamps to include in the response.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "file", + "file_url", + "file_id", + "language", + "temperature", + "stream", + "diarize", + "context_bias", + "timestamp_granularities", + ] + ) + nullable_fields = set(["file_url", "file_id", "language", "temperature"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + AudioTranscriptionRequestStream.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/basemodelcard.py b/src/mistralai/client/models/basemodelcard.py new file mode 100644 index 00000000..9c9e9a20 --- /dev/null +++ b/src/mistralai/client/models/basemodelcard.py @@ -0,0 +1,118 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 556ebdc33276 + +from __future__ import annotations +from .modelcapabilities import ModelCapabilities, ModelCapabilitiesTypedDict +from datetime import datetime +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class BaseModelCardTypedDict(TypedDict): + id: str + capabilities: ModelCapabilitiesTypedDict + object: NotRequired[str] + created: NotRequired[int] + owned_by: NotRequired[str] + name: NotRequired[Nullable[str]] + description: NotRequired[Nullable[str]] + max_context_length: NotRequired[int] + aliases: NotRequired[List[str]] + deprecation: NotRequired[Nullable[datetime]] + deprecation_replacement_model: NotRequired[Nullable[str]] + default_model_temperature: NotRequired[Nullable[float]] + type: Literal["base"] + + +class BaseModelCard(BaseModel): + id: str + + capabilities: ModelCapabilities + + object: Optional[str] = "model" + + created: Optional[int] = None + + owned_by: Optional[str] = "mistralai" + + name: OptionalNullable[str] = UNSET + + description: OptionalNullable[str] = UNSET + + max_context_length: Optional[int] = 32768 + + aliases: Optional[List[str]] = None + + deprecation: OptionalNullable[datetime] = UNSET + + deprecation_replacement_model: OptionalNullable[str] = UNSET + + default_model_temperature: OptionalNullable[float] = UNSET + + type: Annotated[ + Annotated[Literal["base"], AfterValidator(validate_const("base"))], + pydantic.Field(alias="type"), + ] = "base" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "object", + "created", + "owned_by", + "name", + "description", + "max_context_length", + "aliases", + "deprecation", + "deprecation_replacement_model", + "default_model_temperature", + ] + ) + nullable_fields = set( + [ + "name", + "description", + "deprecation", + "deprecation_replacement_model", + "default_model_temperature", + ] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + BaseModelCard.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/batcherror.py b/src/mistralai/client/models/batcherror.py new file mode 100644 index 00000000..8a353cd2 --- /dev/null +++ b/src/mistralai/client/models/batcherror.py @@ -0,0 +1,35 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 1563e2a576ec + +from __future__ import annotations +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class BatchErrorTypedDict(TypedDict): + message: str + count: NotRequired[int] + + +class BatchError(BaseModel): + message: str + + count: Optional[int] = 1 + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["count"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/src/mistralai/client/models/batchjob.py b/src/mistralai/client/models/batchjob.py new file mode 100644 index 00000000..80acac33 --- /dev/null +++ b/src/mistralai/client/models/batchjob.py @@ -0,0 +1,138 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 85cd28932cc7 + +from __future__ import annotations +from .batcherror import BatchError, BatchErrorTypedDict +from .batchjobstatus import BatchJobStatus +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Any, Dict, List, Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class BatchJobTypedDict(TypedDict): + id: str + input_files: List[str] + endpoint: str + errors: List[BatchErrorTypedDict] + status: BatchJobStatus + created_at: int + total_requests: int + completed_requests: int + succeeded_requests: int + failed_requests: int + object: Literal["batch"] + metadata: NotRequired[Nullable[Dict[str, Any]]] + model: NotRequired[Nullable[str]] + agent_id: NotRequired[Nullable[str]] + output_file: NotRequired[Nullable[str]] + error_file: NotRequired[Nullable[str]] + outputs: NotRequired[Nullable[List[Dict[str, Any]]]] + started_at: NotRequired[Nullable[int]] + completed_at: NotRequired[Nullable[int]] + + +class BatchJob(BaseModel): + id: str + + input_files: List[str] + + endpoint: str + + errors: List[BatchError] + + status: BatchJobStatus + + created_at: int + + total_requests: int + + completed_requests: int + + succeeded_requests: int + + failed_requests: int + + object: Annotated[ + Annotated[Optional[Literal["batch"]], AfterValidator(validate_const("batch"))], + pydantic.Field(alias="object"), + ] = "batch" + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + + model: OptionalNullable[str] = UNSET + + agent_id: OptionalNullable[str] = UNSET + + output_file: OptionalNullable[str] = UNSET + + error_file: OptionalNullable[str] = UNSET + + outputs: OptionalNullable[List[Dict[str, Any]]] = UNSET + + started_at: OptionalNullable[int] = UNSET + + completed_at: OptionalNullable[int] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "object", + "metadata", + "model", + "agent_id", + "output_file", + "error_file", + "outputs", + "started_at", + "completed_at", + ] + ) + nullable_fields = set( + [ + "metadata", + "model", + "agent_id", + "output_file", + "error_file", + "outputs", + "started_at", + "completed_at", + ] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + BatchJob.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/batchjobstatus.py b/src/mistralai/client/models/batchjobstatus.py new file mode 100644 index 00000000..bd77faa2 --- /dev/null +++ b/src/mistralai/client/models/batchjobstatus.py @@ -0,0 +1,20 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 61e08cf5eea9 + +from __future__ import annotations +from mistralai.client.types import UnrecognizedStr +from typing import Literal, Union + + +BatchJobStatus = Union[ + Literal[ + "QUEUED", + "RUNNING", + "SUCCESS", + "FAILED", + "TIMEOUT_EXCEEDED", + "CANCELLATION_REQUESTED", + "CANCELLED", + ], + UnrecognizedStr, +] diff --git a/src/mistralai/client/models/batchrequest.py b/src/mistralai/client/models/batchrequest.py new file mode 100644 index 00000000..911a9a05 --- /dev/null +++ b/src/mistralai/client/models/batchrequest.py @@ -0,0 +1,50 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 6f36819eeb46 + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Any, Dict +from typing_extensions import NotRequired, TypedDict + + +class BatchRequestTypedDict(TypedDict): + body: Dict[str, Any] + custom_id: NotRequired[Nullable[str]] + + +class BatchRequest(BaseModel): + body: Dict[str, Any] + + custom_id: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["custom_id"]) + nullable_fields = set(["custom_id"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/builtinconnectors.py b/src/mistralai/client/models/builtinconnectors.py new file mode 100644 index 00000000..ecf60d3c --- /dev/null +++ b/src/mistralai/client/models/builtinconnectors.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 2d276ce938dc + +from __future__ import annotations +from mistralai.client.types import UnrecognizedStr +from typing import Literal, Union + + +BuiltInConnectors = Union[ + Literal[ + "web_search", + "web_search_premium", + "code_interpreter", + "image_generation", + "document_library", + ], + UnrecognizedStr, +] diff --git a/src/mistralai/client/models/chatclassificationrequest.py b/src/mistralai/client/models/chatclassificationrequest.py new file mode 100644 index 00000000..cf2aa78a --- /dev/null +++ b/src/mistralai/client/models/chatclassificationrequest.py @@ -0,0 +1,20 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: afd9cdc71834 + +from __future__ import annotations +from .inputs import Inputs, InputsTypedDict +from mistralai.client.types import BaseModel +from typing_extensions import TypedDict + + +class ChatClassificationRequestTypedDict(TypedDict): + model: str + input: InputsTypedDict + r"""Chat to classify""" + + +class ChatClassificationRequest(BaseModel): + model: str + + input: Inputs + r"""Chat to classify""" diff --git a/src/mistralai/client/models/chatcompletionchoice.py b/src/mistralai/client/models/chatcompletionchoice.py new file mode 100644 index 00000000..2c515f6e --- /dev/null +++ b/src/mistralai/client/models/chatcompletionchoice.py @@ -0,0 +1,34 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 7e6a512f6a04 + +from __future__ import annotations +from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from mistralai.client.types import BaseModel, UnrecognizedStr +from typing import Literal, Union +from typing_extensions import TypedDict + + +ChatCompletionChoiceFinishReason = Union[ + Literal[ + "stop", + "length", + "model_length", + "error", + "tool_calls", + ], + UnrecognizedStr, +] + + +class ChatCompletionChoiceTypedDict(TypedDict): + index: int + message: AssistantMessageTypedDict + finish_reason: ChatCompletionChoiceFinishReason + + +class ChatCompletionChoice(BaseModel): + index: int + + message: AssistantMessage + + finish_reason: ChatCompletionChoiceFinishReason diff --git a/src/mistralai/client/models/chatcompletionrequest.py b/src/mistralai/client/models/chatcompletionrequest.py new file mode 100644 index 00000000..e871bd92 --- /dev/null +++ b/src/mistralai/client/models/chatcompletionrequest.py @@ -0,0 +1,225 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 9979805d8c38 + +from __future__ import annotations +from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .mistralpromptmode import MistralPromptMode +from .prediction import Prediction, PredictionTypedDict +from .responseformat import ResponseFormat, ResponseFormatTypedDict +from .systemmessage import SystemMessage, SystemMessageTypedDict +from .tool import Tool, ToolTypedDict +from .toolchoice import ToolChoice, ToolChoiceTypedDict +from .toolchoiceenum import ToolChoiceEnum +from .toolmessage import ToolMessage, ToolMessageTypedDict +from .usermessage import UserMessage, UserMessageTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import get_discriminator +from pydantic import Discriminator, Tag, model_serializer +from typing import Any, Dict, List, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +ChatCompletionRequestStopTypedDict = TypeAliasType( + "ChatCompletionRequestStopTypedDict", Union[str, List[str]] +) +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +ChatCompletionRequestStop = TypeAliasType( + "ChatCompletionRequestStop", Union[str, List[str]] +) +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +ChatCompletionRequestMessageTypedDict = TypeAliasType( + "ChatCompletionRequestMessageTypedDict", + Union[ + SystemMessageTypedDict, + UserMessageTypedDict, + AssistantMessageTypedDict, + ToolMessageTypedDict, + ], +) + + +ChatCompletionRequestMessage = Annotated[ + Union[ + Annotated[AssistantMessage, Tag("assistant")], + Annotated[SystemMessage, Tag("system")], + Annotated[ToolMessage, Tag("tool")], + Annotated[UserMessage, Tag("user")], + ], + Discriminator(lambda m: get_discriminator(m, "role", "role")), +] + + +ChatCompletionRequestToolChoiceTypedDict = TypeAliasType( + "ChatCompletionRequestToolChoiceTypedDict", + Union[ToolChoiceTypedDict, ToolChoiceEnum], +) +r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" + + +ChatCompletionRequestToolChoice = TypeAliasType( + "ChatCompletionRequestToolChoice", Union[ToolChoice, ToolChoiceEnum] +) +r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" + + +class ChatCompletionRequestTypedDict(TypedDict): + model: str + r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" + messages: List[ChatCompletionRequestMessageTypedDict] + r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + temperature: NotRequired[Nullable[float]] + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" + top_p: NotRequired[float] + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: NotRequired[Nullable[int]] + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + stream: NotRequired[bool] + r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" + stop: NotRequired[ChatCompletionRequestStopTypedDict] + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: NotRequired[Nullable[int]] + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + metadata: NotRequired[Nullable[Dict[str, Any]]] + response_format: NotRequired[ResponseFormatTypedDict] + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" + tools: NotRequired[Nullable[List[ToolTypedDict]]] + r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.""" + tool_choice: NotRequired[ChatCompletionRequestToolChoiceTypedDict] + r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" + presence_penalty: NotRequired[float] + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + frequency_penalty: NotRequired[float] + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + n: NotRequired[Nullable[int]] + r"""Number of completions to return for each request, input tokens are only billed once.""" + prediction: NotRequired[PredictionTypedDict] + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" + parallel_tool_calls: NotRequired[bool] + r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.""" + prompt_mode: NotRequired[Nullable[MistralPromptMode]] + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" + safe_prompt: NotRequired[bool] + r"""Whether to inject a safety prompt before all conversations.""" + + +class ChatCompletionRequest(BaseModel): + model: str + r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" + + messages: List[ChatCompletionRequestMessage] + r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + + temperature: OptionalNullable[float] = UNSET + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" + + top_p: Optional[float] = None + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + + max_tokens: OptionalNullable[int] = UNSET + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + + stream: Optional[bool] = False + r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" + + stop: Optional[ChatCompletionRequestStop] = None + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + random_seed: OptionalNullable[int] = UNSET + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + + response_format: Optional[ResponseFormat] = None + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" + + tools: OptionalNullable[List[Tool]] = UNSET + r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.""" + + tool_choice: Optional[ChatCompletionRequestToolChoice] = None + r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" + + presence_penalty: Optional[float] = None + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + + frequency_penalty: Optional[float] = None + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + + n: OptionalNullable[int] = UNSET + r"""Number of completions to return for each request, input tokens are only billed once.""" + + prediction: Optional[Prediction] = None + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" + + parallel_tool_calls: Optional[bool] = None + r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.""" + + prompt_mode: OptionalNullable[MistralPromptMode] = UNSET + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" + + safe_prompt: Optional[bool] = None + r"""Whether to inject a safety prompt before all conversations.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "temperature", + "top_p", + "max_tokens", + "stream", + "stop", + "random_seed", + "metadata", + "response_format", + "tools", + "tool_choice", + "presence_penalty", + "frequency_penalty", + "n", + "prediction", + "parallel_tool_calls", + "prompt_mode", + "safe_prompt", + ] + ) + nullable_fields = set( + [ + "temperature", + "max_tokens", + "random_seed", + "metadata", + "tools", + "n", + "prompt_mode", + ] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/chatcompletionresponse.py b/src/mistralai/client/models/chatcompletionresponse.py new file mode 100644 index 00000000..7092bbc1 --- /dev/null +++ b/src/mistralai/client/models/chatcompletionresponse.py @@ -0,0 +1,32 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 669d996b8e82 + +from __future__ import annotations +from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict +from .usageinfo import UsageInfo, UsageInfoTypedDict +from mistralai.client.types import BaseModel +from typing import List +from typing_extensions import TypedDict + + +class ChatCompletionResponseTypedDict(TypedDict): + id: str + object: str + model: str + usage: UsageInfoTypedDict + created: int + choices: List[ChatCompletionChoiceTypedDict] + + +class ChatCompletionResponse(BaseModel): + id: str + + object: str + + model: str + + usage: UsageInfo + + created: int + + choices: List[ChatCompletionChoice] diff --git a/src/mistralai/client/models/chatcompletionstreamrequest.py b/src/mistralai/client/models/chatcompletionstreamrequest.py new file mode 100644 index 00000000..b7b2bff1 --- /dev/null +++ b/src/mistralai/client/models/chatcompletionstreamrequest.py @@ -0,0 +1,223 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 18cb2b2415d4 + +from __future__ import annotations +from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .mistralpromptmode import MistralPromptMode +from .prediction import Prediction, PredictionTypedDict +from .responseformat import ResponseFormat, ResponseFormatTypedDict +from .systemmessage import SystemMessage, SystemMessageTypedDict +from .tool import Tool, ToolTypedDict +from .toolchoice import ToolChoice, ToolChoiceTypedDict +from .toolchoiceenum import ToolChoiceEnum +from .toolmessage import ToolMessage, ToolMessageTypedDict +from .usermessage import UserMessage, UserMessageTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import get_discriminator +from pydantic import Discriminator, Tag, model_serializer +from typing import Any, Dict, List, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +ChatCompletionStreamRequestStopTypedDict = TypeAliasType( + "ChatCompletionStreamRequestStopTypedDict", Union[str, List[str]] +) +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +ChatCompletionStreamRequestStop = TypeAliasType( + "ChatCompletionStreamRequestStop", Union[str, List[str]] +) +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +ChatCompletionStreamRequestMessageTypedDict = TypeAliasType( + "ChatCompletionStreamRequestMessageTypedDict", + Union[ + SystemMessageTypedDict, + UserMessageTypedDict, + AssistantMessageTypedDict, + ToolMessageTypedDict, + ], +) + + +ChatCompletionStreamRequestMessage = Annotated[ + Union[ + Annotated[AssistantMessage, Tag("assistant")], + Annotated[SystemMessage, Tag("system")], + Annotated[ToolMessage, Tag("tool")], + Annotated[UserMessage, Tag("user")], + ], + Discriminator(lambda m: get_discriminator(m, "role", "role")), +] + + +ChatCompletionStreamRequestToolChoiceTypedDict = TypeAliasType( + "ChatCompletionStreamRequestToolChoiceTypedDict", + Union[ToolChoiceTypedDict, ToolChoiceEnum], +) +r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" + + +ChatCompletionStreamRequestToolChoice = TypeAliasType( + "ChatCompletionStreamRequestToolChoice", Union[ToolChoice, ToolChoiceEnum] +) +r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" + + +class ChatCompletionStreamRequestTypedDict(TypedDict): + model: str + r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" + messages: List[ChatCompletionStreamRequestMessageTypedDict] + r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + temperature: NotRequired[Nullable[float]] + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" + top_p: NotRequired[float] + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: NotRequired[Nullable[int]] + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + stream: NotRequired[bool] + stop: NotRequired[ChatCompletionStreamRequestStopTypedDict] + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: NotRequired[Nullable[int]] + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + metadata: NotRequired[Nullable[Dict[str, Any]]] + response_format: NotRequired[ResponseFormatTypedDict] + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" + tools: NotRequired[Nullable[List[ToolTypedDict]]] + r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.""" + tool_choice: NotRequired[ChatCompletionStreamRequestToolChoiceTypedDict] + r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" + presence_penalty: NotRequired[float] + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + frequency_penalty: NotRequired[float] + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + n: NotRequired[Nullable[int]] + r"""Number of completions to return for each request, input tokens are only billed once.""" + prediction: NotRequired[PredictionTypedDict] + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" + parallel_tool_calls: NotRequired[bool] + r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.""" + prompt_mode: NotRequired[Nullable[MistralPromptMode]] + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" + safe_prompt: NotRequired[bool] + r"""Whether to inject a safety prompt before all conversations.""" + + +class ChatCompletionStreamRequest(BaseModel): + model: str + r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" + + messages: List[ChatCompletionStreamRequestMessage] + r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + + temperature: OptionalNullable[float] = UNSET + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" + + top_p: Optional[float] = None + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + + max_tokens: OptionalNullable[int] = UNSET + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + + stream: Optional[bool] = True + + stop: Optional[ChatCompletionStreamRequestStop] = None + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + random_seed: OptionalNullable[int] = UNSET + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + + response_format: Optional[ResponseFormat] = None + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" + + tools: OptionalNullable[List[Tool]] = UNSET + r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.""" + + tool_choice: Optional[ChatCompletionStreamRequestToolChoice] = None + r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" + + presence_penalty: Optional[float] = None + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + + frequency_penalty: Optional[float] = None + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + + n: OptionalNullable[int] = UNSET + r"""Number of completions to return for each request, input tokens are only billed once.""" + + prediction: Optional[Prediction] = None + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" + + parallel_tool_calls: Optional[bool] = None + r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.""" + + prompt_mode: OptionalNullable[MistralPromptMode] = UNSET + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" + + safe_prompt: Optional[bool] = None + r"""Whether to inject a safety prompt before all conversations.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "temperature", + "top_p", + "max_tokens", + "stream", + "stop", + "random_seed", + "metadata", + "response_format", + "tools", + "tool_choice", + "presence_penalty", + "frequency_penalty", + "n", + "prediction", + "parallel_tool_calls", + "prompt_mode", + "safe_prompt", + ] + ) + nullable_fields = set( + [ + "temperature", + "max_tokens", + "random_seed", + "metadata", + "tools", + "n", + "prompt_mode", + ] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/chatmoderationrequest.py b/src/mistralai/client/models/chatmoderationrequest.py new file mode 100644 index 00000000..228e7d26 --- /dev/null +++ b/src/mistralai/client/models/chatmoderationrequest.py @@ -0,0 +1,94 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 057aecb07275 + +from __future__ import annotations +from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .systemmessage import SystemMessage, SystemMessageTypedDict +from .toolmessage import ToolMessage, ToolMessageTypedDict +from .usermessage import UserMessage, UserMessageTypedDict +from mistralai.client.types import BaseModel +from mistralai.client.utils import get_discriminator +import pydantic +from pydantic import Discriminator, Tag +from typing import List, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +ChatModerationRequestInputs2TypedDict = TypeAliasType( + "ChatModerationRequestInputs2TypedDict", + Union[ + SystemMessageTypedDict, + UserMessageTypedDict, + AssistantMessageTypedDict, + ToolMessageTypedDict, + ], +) + + +ChatModerationRequestInputs2 = Annotated[ + Union[ + Annotated[AssistantMessage, Tag("assistant")], + Annotated[SystemMessage, Tag("system")], + Annotated[ToolMessage, Tag("tool")], + Annotated[UserMessage, Tag("user")], + ], + Discriminator(lambda m: get_discriminator(m, "role", "role")), +] + + +ChatModerationRequestInputs1TypedDict = TypeAliasType( + "ChatModerationRequestInputs1TypedDict", + Union[ + SystemMessageTypedDict, + UserMessageTypedDict, + AssistantMessageTypedDict, + ToolMessageTypedDict, + ], +) + + +ChatModerationRequestInputs1 = Annotated[ + Union[ + Annotated[AssistantMessage, Tag("assistant")], + Annotated[SystemMessage, Tag("system")], + Annotated[ToolMessage, Tag("tool")], + Annotated[UserMessage, Tag("user")], + ], + Discriminator(lambda m: get_discriminator(m, "role", "role")), +] + + +ChatModerationRequestInputs3TypedDict = TypeAliasType( + "ChatModerationRequestInputs3TypedDict", + Union[ + List[ChatModerationRequestInputs1TypedDict], + List[List[ChatModerationRequestInputs2TypedDict]], + ], +) +r"""Chat to classify""" + + +ChatModerationRequestInputs3 = TypeAliasType( + "ChatModerationRequestInputs3", + Union[List[ChatModerationRequestInputs1], List[List[ChatModerationRequestInputs2]]], +) +r"""Chat to classify""" + + +class ChatModerationRequestTypedDict(TypedDict): + inputs: ChatModerationRequestInputs3TypedDict + r"""Chat to classify""" + model: str + + +class ChatModerationRequest(BaseModel): + inputs: Annotated[ChatModerationRequestInputs3, pydantic.Field(alias="input")] + r"""Chat to classify""" + + model: str + + +try: + ChatModerationRequest.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/checkpoint.py b/src/mistralai/client/models/checkpoint.py new file mode 100644 index 00000000..c24e433e --- /dev/null +++ b/src/mistralai/client/models/checkpoint.py @@ -0,0 +1,27 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 1a530d3674d8 + +from __future__ import annotations +from .metric import Metric, MetricTypedDict +from mistralai.client.types import BaseModel +from typing_extensions import TypedDict + + +class CheckpointTypedDict(TypedDict): + metrics: MetricTypedDict + r"""Metrics at the step number during the fine-tuning job. Use these metrics to assess if the training is going smoothly (loss should decrease, token accuracy should increase).""" + step_number: int + r"""The step number that the checkpoint was created at.""" + created_at: int + r"""The UNIX timestamp (in seconds) for when the checkpoint was created.""" + + +class Checkpoint(BaseModel): + metrics: Metric + r"""Metrics at the step number during the fine-tuning job. Use these metrics to assess if the training is going smoothly (loss should decrease, token accuracy should increase).""" + + step_number: int + r"""The step number that the checkpoint was created at.""" + + created_at: int + r"""The UNIX timestamp (in seconds) for when the checkpoint was created.""" diff --git a/src/mistralai/client/models/classificationrequest.py b/src/mistralai/client/models/classificationrequest.py new file mode 100644 index 00000000..25b69413 --- /dev/null +++ b/src/mistralai/client/models/classificationrequest.py @@ -0,0 +1,76 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 6942fe3de24a + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +import pydantic +from pydantic import model_serializer +from typing import Any, Dict, List, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +ClassificationRequestInputsTypedDict = TypeAliasType( + "ClassificationRequestInputsTypedDict", Union[str, List[str]] +) +r"""Text to classify.""" + + +ClassificationRequestInputs = TypeAliasType( + "ClassificationRequestInputs", Union[str, List[str]] +) +r"""Text to classify.""" + + +class ClassificationRequestTypedDict(TypedDict): + model: str + r"""ID of the model to use.""" + inputs: ClassificationRequestInputsTypedDict + r"""Text to classify.""" + metadata: NotRequired[Nullable[Dict[str, Any]]] + + +class ClassificationRequest(BaseModel): + model: str + r"""ID of the model to use.""" + + inputs: Annotated[ClassificationRequestInputs, pydantic.Field(alias="input")] + r"""Text to classify.""" + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["metadata"]) + nullable_fields = set(["metadata"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + ClassificationRequest.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/classificationresponse.py b/src/mistralai/client/models/classificationresponse.py new file mode 100644 index 00000000..d2f09f43 --- /dev/null +++ b/src/mistralai/client/models/classificationresponse.py @@ -0,0 +1,25 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: eaf279db1109 + +from __future__ import annotations +from .classificationtargetresult import ( + ClassificationTargetResult, + ClassificationTargetResultTypedDict, +) +from mistralai.client.types import BaseModel +from typing import Dict, List +from typing_extensions import TypedDict + + +class ClassificationResponseTypedDict(TypedDict): + id: str + model: str + results: List[Dict[str, ClassificationTargetResultTypedDict]] + + +class ClassificationResponse(BaseModel): + id: str + + model: str + + results: List[Dict[str, ClassificationTargetResult]] diff --git a/src/mistralai/client/models/classificationtargetresult.py b/src/mistralai/client/models/classificationtargetresult.py new file mode 100644 index 00000000..6c7d6231 --- /dev/null +++ b/src/mistralai/client/models/classificationtargetresult.py @@ -0,0 +1,15 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 2445f12b2a57 + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing import Dict +from typing_extensions import TypedDict + + +class ClassificationTargetResultTypedDict(TypedDict): + scores: Dict[str, float] + + +class ClassificationTargetResult(BaseModel): + scores: Dict[str, float] diff --git a/src/mistralai/client/models/classifierfinetunedmodel.py b/src/mistralai/client/models/classifierfinetunedmodel.py new file mode 100644 index 00000000..fbcf5892 --- /dev/null +++ b/src/mistralai/client/models/classifierfinetunedmodel.py @@ -0,0 +1,117 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 5a9a7a0153c8 + +from __future__ import annotations +from .classifiertargetresult import ( + ClassifierTargetResult, + ClassifierTargetResultTypedDict, +) +from .finetunedmodelcapabilities import ( + FineTunedModelCapabilities, + FineTunedModelCapabilitiesTypedDict, +) +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class ClassifierFineTunedModelTypedDict(TypedDict): + id: str + created: int + owned_by: str + workspace_id: str + root: str + root_version: str + archived: bool + capabilities: FineTunedModelCapabilitiesTypedDict + job: str + classifier_targets: List[ClassifierTargetResultTypedDict] + object: Literal["model"] + name: NotRequired[Nullable[str]] + description: NotRequired[Nullable[str]] + max_context_length: NotRequired[int] + aliases: NotRequired[List[str]] + model_type: Literal["classifier"] + + +class ClassifierFineTunedModel(BaseModel): + id: str + + created: int + + owned_by: str + + workspace_id: str + + root: str + + root_version: str + + archived: bool + + capabilities: FineTunedModelCapabilities + + job: str + + classifier_targets: List[ClassifierTargetResult] + + object: Annotated[ + Annotated[Optional[Literal["model"]], AfterValidator(validate_const("model"))], + pydantic.Field(alias="object"), + ] = "model" + + name: OptionalNullable[str] = UNSET + + description: OptionalNullable[str] = UNSET + + max_context_length: Optional[int] = 32768 + + aliases: Optional[List[str]] = None + + model_type: Annotated[ + Annotated[Literal["classifier"], AfterValidator(validate_const("classifier"))], + pydantic.Field(alias="model_type"), + ] = "classifier" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + ["object", "name", "description", "max_context_length", "aliases"] + ) + nullable_fields = set(["name", "description"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + ClassifierFineTunedModel.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/classifierfinetuningjob.py b/src/mistralai/client/models/classifierfinetuningjob.py new file mode 100644 index 00000000..fb160cf8 --- /dev/null +++ b/src/mistralai/client/models/classifierfinetuningjob.py @@ -0,0 +1,201 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: a244d5f2afc5 + +from __future__ import annotations +from .classifiertrainingparameters import ( + ClassifierTrainingParameters, + ClassifierTrainingParametersTypedDict, +) +from .jobmetadata import JobMetadata, JobMetadataTypedDict +from .wandbintegrationresult import ( + WandbIntegrationResult, + WandbIntegrationResultTypedDict, +) +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, + UnrecognizedStr, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import ConfigDict, model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Any, List, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypedDict + + +ClassifierFineTuningJobStatus = Union[ + Literal[ + "QUEUED", + "STARTED", + "VALIDATING", + "VALIDATED", + "RUNNING", + "FAILED_VALIDATION", + "FAILED", + "SUCCESS", + "CANCELLED", + "CANCELLATION_REQUESTED", + ], + UnrecognizedStr, +] +r"""The current status of the fine-tuning job.""" + + +ClassifierFineTuningJobIntegrationTypedDict = WandbIntegrationResultTypedDict + + +class UnknownClassifierFineTuningJobIntegration(BaseModel): + r"""A ClassifierFineTuningJobIntegration variant the SDK doesn't recognize. Preserves the raw payload.""" + + type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_CLASSIFIER_FINE_TUNING_JOB_INTEGRATION_VARIANTS: dict[str, Any] = { + "wandb": WandbIntegrationResult, +} + + +ClassifierFineTuningJobIntegration = WandbIntegrationResult + + +class ClassifierFineTuningJobTypedDict(TypedDict): + id: str + r"""The ID of the job.""" + auto_start: bool + model: str + status: ClassifierFineTuningJobStatus + r"""The current status of the fine-tuning job.""" + created_at: int + r"""The UNIX timestamp (in seconds) for when the fine-tuning job was created.""" + modified_at: int + r"""The UNIX timestamp (in seconds) for when the fine-tuning job was last modified.""" + training_files: List[str] + r"""A list containing the IDs of uploaded files that contain training data.""" + hyperparameters: ClassifierTrainingParametersTypedDict + validation_files: NotRequired[Nullable[List[str]]] + r"""A list containing the IDs of uploaded files that contain validation data.""" + object: Literal["job"] + r"""The object type of the fine-tuning job.""" + fine_tuned_model: NotRequired[Nullable[str]] + r"""The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running.""" + suffix: NotRequired[Nullable[str]] + r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + integrations: NotRequired[ + Nullable[List[ClassifierFineTuningJobIntegrationTypedDict]] + ] + r"""A list of integrations enabled for your fine-tuning job.""" + trained_tokens: NotRequired[Nullable[int]] + r"""Total number of tokens trained.""" + metadata: NotRequired[Nullable[JobMetadataTypedDict]] + job_type: Literal["classifier"] + r"""The type of job (`FT` for fine-tuning).""" + + +class ClassifierFineTuningJob(BaseModel): + id: str + r"""The ID of the job.""" + + auto_start: bool + + model: str + + status: ClassifierFineTuningJobStatus + r"""The current status of the fine-tuning job.""" + + created_at: int + r"""The UNIX timestamp (in seconds) for when the fine-tuning job was created.""" + + modified_at: int + r"""The UNIX timestamp (in seconds) for when the fine-tuning job was last modified.""" + + training_files: List[str] + r"""A list containing the IDs of uploaded files that contain training data.""" + + hyperparameters: ClassifierTrainingParameters + + validation_files: OptionalNullable[List[str]] = UNSET + r"""A list containing the IDs of uploaded files that contain validation data.""" + + object: Annotated[ + Annotated[Optional[Literal["job"]], AfterValidator(validate_const("job"))], + pydantic.Field(alias="object"), + ] = "job" + r"""The object type of the fine-tuning job.""" + + fine_tuned_model: OptionalNullable[str] = UNSET + r"""The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running.""" + + suffix: OptionalNullable[str] = UNSET + r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + + integrations: OptionalNullable[List[ClassifierFineTuningJobIntegration]] = UNSET + r"""A list of integrations enabled for your fine-tuning job.""" + + trained_tokens: OptionalNullable[int] = UNSET + r"""Total number of tokens trained.""" + + metadata: OptionalNullable[JobMetadata] = UNSET + + job_type: Annotated[ + Annotated[Literal["classifier"], AfterValidator(validate_const("classifier"))], + pydantic.Field(alias="job_type"), + ] = "classifier" + r"""The type of job (`FT` for fine-tuning).""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "validation_files", + "object", + "fine_tuned_model", + "suffix", + "integrations", + "trained_tokens", + "metadata", + ] + ) + nullable_fields = set( + [ + "validation_files", + "fine_tuned_model", + "suffix", + "integrations", + "trained_tokens", + "metadata", + ] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + ClassifierFineTuningJob.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/classifierfinetuningjobdetails.py b/src/mistralai/client/models/classifierfinetuningjobdetails.py new file mode 100644 index 00000000..5d73f55e --- /dev/null +++ b/src/mistralai/client/models/classifierfinetuningjobdetails.py @@ -0,0 +1,197 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 75c5dee8df2e + +from __future__ import annotations +from .checkpoint import Checkpoint, CheckpointTypedDict +from .classifiertargetresult import ( + ClassifierTargetResult, + ClassifierTargetResultTypedDict, +) +from .classifiertrainingparameters import ( + ClassifierTrainingParameters, + ClassifierTrainingParametersTypedDict, +) +from .event import Event, EventTypedDict +from .jobmetadata import JobMetadata, JobMetadataTypedDict +from .wandbintegrationresult import ( + WandbIntegrationResult, + WandbIntegrationResultTypedDict, +) +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, + UnrecognizedStr, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import ConfigDict, model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Any, List, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypedDict + + +ClassifierFineTuningJobDetailsStatus = Union[ + Literal[ + "QUEUED", + "STARTED", + "VALIDATING", + "VALIDATED", + "RUNNING", + "FAILED_VALIDATION", + "FAILED", + "SUCCESS", + "CANCELLED", + "CANCELLATION_REQUESTED", + ], + UnrecognizedStr, +] + + +ClassifierFineTuningJobDetailsIntegrationTypedDict = WandbIntegrationResultTypedDict + + +class UnknownClassifierFineTuningJobDetailsIntegration(BaseModel): + r"""A ClassifierFineTuningJobDetailsIntegration variant the SDK doesn't recognize. Preserves the raw payload.""" + + type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_CLASSIFIER_FINE_TUNING_JOB_DETAILS_INTEGRATION_VARIANTS: dict[str, Any] = { + "wandb": WandbIntegrationResult, +} + + +ClassifierFineTuningJobDetailsIntegration = WandbIntegrationResult + + +class ClassifierFineTuningJobDetailsTypedDict(TypedDict): + id: str + auto_start: bool + model: str + status: ClassifierFineTuningJobDetailsStatus + created_at: int + modified_at: int + training_files: List[str] + hyperparameters: ClassifierTrainingParametersTypedDict + classifier_targets: List[ClassifierTargetResultTypedDict] + validation_files: NotRequired[Nullable[List[str]]] + object: Literal["job"] + fine_tuned_model: NotRequired[Nullable[str]] + suffix: NotRequired[Nullable[str]] + integrations: NotRequired[ + Nullable[List[ClassifierFineTuningJobDetailsIntegrationTypedDict]] + ] + trained_tokens: NotRequired[Nullable[int]] + metadata: NotRequired[Nullable[JobMetadataTypedDict]] + job_type: Literal["classifier"] + events: NotRequired[List[EventTypedDict]] + r"""Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here.""" + checkpoints: NotRequired[List[CheckpointTypedDict]] + + +class ClassifierFineTuningJobDetails(BaseModel): + id: str + + auto_start: bool + + model: str + + status: ClassifierFineTuningJobDetailsStatus + + created_at: int + + modified_at: int + + training_files: List[str] + + hyperparameters: ClassifierTrainingParameters + + classifier_targets: List[ClassifierTargetResult] + + validation_files: OptionalNullable[List[str]] = UNSET + + object: Annotated[ + Annotated[Optional[Literal["job"]], AfterValidator(validate_const("job"))], + pydantic.Field(alias="object"), + ] = "job" + + fine_tuned_model: OptionalNullable[str] = UNSET + + suffix: OptionalNullable[str] = UNSET + + integrations: OptionalNullable[List[ClassifierFineTuningJobDetailsIntegration]] = ( + UNSET + ) + + trained_tokens: OptionalNullable[int] = UNSET + + metadata: OptionalNullable[JobMetadata] = UNSET + + job_type: Annotated[ + Annotated[Literal["classifier"], AfterValidator(validate_const("classifier"))], + pydantic.Field(alias="job_type"), + ] = "classifier" + + events: Optional[List[Event]] = None + r"""Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here.""" + + checkpoints: Optional[List[Checkpoint]] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "validation_files", + "object", + "fine_tuned_model", + "suffix", + "integrations", + "trained_tokens", + "metadata", + "events", + "checkpoints", + ] + ) + nullable_fields = set( + [ + "validation_files", + "fine_tuned_model", + "suffix", + "integrations", + "trained_tokens", + "metadata", + ] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + ClassifierFineTuningJobDetails.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/classifiertarget.py b/src/mistralai/client/models/classifiertarget.py new file mode 100644 index 00000000..4d66d789 --- /dev/null +++ b/src/mistralai/client/models/classifiertarget.py @@ -0,0 +1,57 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 2177d51d9dcf + +from __future__ import annotations +from .ftclassifierlossfunction import FTClassifierLossFunction +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List, Optional +from typing_extensions import NotRequired, TypedDict + + +class ClassifierTargetTypedDict(TypedDict): + name: str + labels: List[str] + weight: NotRequired[float] + loss_function: NotRequired[Nullable[FTClassifierLossFunction]] + + +class ClassifierTarget(BaseModel): + name: str + + labels: List[str] + + weight: Optional[float] = 1 + + loss_function: OptionalNullable[FTClassifierLossFunction] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["weight", "loss_function"]) + nullable_fields = set(["loss_function"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/classifiertargetresult.py b/src/mistralai/client/models/classifiertargetresult.py new file mode 100644 index 00000000..8ce7c0ca --- /dev/null +++ b/src/mistralai/client/models/classifiertargetresult.py @@ -0,0 +1,25 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 19c343844888 + +from __future__ import annotations +from .ftclassifierlossfunction import FTClassifierLossFunction +from mistralai.client.types import BaseModel +from typing import List +from typing_extensions import TypedDict + + +class ClassifierTargetResultTypedDict(TypedDict): + name: str + labels: List[str] + weight: float + loss_function: FTClassifierLossFunction + + +class ClassifierTargetResult(BaseModel): + name: str + + labels: List[str] + + weight: float + + loss_function: FTClassifierLossFunction diff --git a/src/mistralai/client/models/classifiertrainingparameters.py b/src/mistralai/client/models/classifiertrainingparameters.py new file mode 100644 index 00000000..14fa4926 --- /dev/null +++ b/src/mistralai/client/models/classifiertrainingparameters.py @@ -0,0 +1,73 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 4000b05e3b8d + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class ClassifierTrainingParametersTypedDict(TypedDict): + training_steps: NotRequired[Nullable[int]] + learning_rate: NotRequired[float] + weight_decay: NotRequired[Nullable[float]] + warmup_fraction: NotRequired[Nullable[float]] + epochs: NotRequired[Nullable[float]] + seq_len: NotRequired[Nullable[int]] + + +class ClassifierTrainingParameters(BaseModel): + training_steps: OptionalNullable[int] = UNSET + + learning_rate: Optional[float] = 0.0001 + + weight_decay: OptionalNullable[float] = UNSET + + warmup_fraction: OptionalNullable[float] = UNSET + + epochs: OptionalNullable[float] = UNSET + + seq_len: OptionalNullable[int] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "training_steps", + "learning_rate", + "weight_decay", + "warmup_fraction", + "epochs", + "seq_len", + ] + ) + nullable_fields = set( + ["training_steps", "weight_decay", "warmup_fraction", "epochs", "seq_len"] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/codeinterpretertool.py b/src/mistralai/client/models/codeinterpretertool.py new file mode 100644 index 00000000..ce14265f --- /dev/null +++ b/src/mistralai/client/models/codeinterpretertool.py @@ -0,0 +1,66 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 950cd8f4ad49 + +from __future__ import annotations +from .toolconfiguration import ToolConfiguration, ToolConfigurationTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal +from typing_extensions import Annotated, NotRequired, TypedDict + + +class CodeInterpreterToolTypedDict(TypedDict): + tool_configuration: NotRequired[Nullable[ToolConfigurationTypedDict]] + type: Literal["code_interpreter"] + + +class CodeInterpreterTool(BaseModel): + tool_configuration: OptionalNullable[ToolConfiguration] = UNSET + + type: Annotated[ + Annotated[ + Literal["code_interpreter"], + AfterValidator(validate_const("code_interpreter")), + ], + pydantic.Field(alias="type"), + ] = "code_interpreter" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["tool_configuration"]) + nullable_fields = set(["tool_configuration"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + CodeInterpreterTool.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/completionargs.py b/src/mistralai/client/models/completionargs.py new file mode 100644 index 00000000..ab5cf5ff --- /dev/null +++ b/src/mistralai/client/models/completionargs.py @@ -0,0 +1,107 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 3db008bcddca + +from __future__ import annotations +from .completionargsstop import CompletionArgsStop, CompletionArgsStopTypedDict +from .prediction import Prediction, PredictionTypedDict +from .responseformat import ResponseFormat, ResponseFormatTypedDict +from .toolchoiceenum import ToolChoiceEnum +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class CompletionArgsTypedDict(TypedDict): + r"""White-listed arguments from the completion API""" + + stop: NotRequired[Nullable[CompletionArgsStopTypedDict]] + presence_penalty: NotRequired[Nullable[float]] + frequency_penalty: NotRequired[Nullable[float]] + temperature: NotRequired[Nullable[float]] + top_p: NotRequired[Nullable[float]] + max_tokens: NotRequired[Nullable[int]] + random_seed: NotRequired[Nullable[int]] + prediction: NotRequired[Nullable[PredictionTypedDict]] + response_format: NotRequired[Nullable[ResponseFormatTypedDict]] + tool_choice: NotRequired[ToolChoiceEnum] + + +class CompletionArgs(BaseModel): + r"""White-listed arguments from the completion API""" + + stop: OptionalNullable[CompletionArgsStop] = UNSET + + presence_penalty: OptionalNullable[float] = UNSET + + frequency_penalty: OptionalNullable[float] = UNSET + + temperature: OptionalNullable[float] = UNSET + + top_p: OptionalNullable[float] = UNSET + + max_tokens: OptionalNullable[int] = UNSET + + random_seed: OptionalNullable[int] = UNSET + + prediction: OptionalNullable[Prediction] = UNSET + + response_format: OptionalNullable[ResponseFormat] = UNSET + + tool_choice: Optional[ToolChoiceEnum] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "stop", + "presence_penalty", + "frequency_penalty", + "temperature", + "top_p", + "max_tokens", + "random_seed", + "prediction", + "response_format", + "tool_choice", + ] + ) + nullable_fields = set( + [ + "stop", + "presence_penalty", + "frequency_penalty", + "temperature", + "top_p", + "max_tokens", + "random_seed", + "prediction", + "response_format", + ] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/completionargsstop.py b/src/mistralai/client/models/completionargsstop.py new file mode 100644 index 00000000..39c858e6 --- /dev/null +++ b/src/mistralai/client/models/completionargsstop.py @@ -0,0 +1,14 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 5f339214501d + +from __future__ import annotations +from typing import List, Union +from typing_extensions import TypeAliasType + + +CompletionArgsStopTypedDict = TypeAliasType( + "CompletionArgsStopTypedDict", Union[str, List[str]] +) + + +CompletionArgsStop = TypeAliasType("CompletionArgsStop", Union[str, List[str]]) diff --git a/src/mistralai/client/models/completionchunk.py b/src/mistralai/client/models/completionchunk.py new file mode 100644 index 00000000..5fd6c173 --- /dev/null +++ b/src/mistralai/client/models/completionchunk.py @@ -0,0 +1,52 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: d786b44926f4 + +from __future__ import annotations +from .completionresponsestreamchoice import ( + CompletionResponseStreamChoice, + CompletionResponseStreamChoiceTypedDict, +) +from .usageinfo import UsageInfo, UsageInfoTypedDict +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer +from typing import List, Optional +from typing_extensions import NotRequired, TypedDict + + +class CompletionChunkTypedDict(TypedDict): + id: str + model: str + choices: List[CompletionResponseStreamChoiceTypedDict] + object: NotRequired[str] + created: NotRequired[int] + usage: NotRequired[UsageInfoTypedDict] + + +class CompletionChunk(BaseModel): + id: str + + model: str + + choices: List[CompletionResponseStreamChoice] + + object: Optional[str] = None + + created: Optional[int] = None + + usage: Optional[UsageInfo] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["object", "created", "usage"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/src/mistralai/client/models/completionevent.py b/src/mistralai/client/models/completionevent.py new file mode 100644 index 00000000..3b90ab0c --- /dev/null +++ b/src/mistralai/client/models/completionevent.py @@ -0,0 +1,15 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: c68817e7e190 + +from __future__ import annotations +from .completionchunk import CompletionChunk, CompletionChunkTypedDict +from mistralai.client.types import BaseModel +from typing_extensions import TypedDict + + +class CompletionEventTypedDict(TypedDict): + data: CompletionChunkTypedDict + + +class CompletionEvent(BaseModel): + data: CompletionChunk diff --git a/src/mistralai/client/models/completionfinetunedmodel.py b/src/mistralai/client/models/completionfinetunedmodel.py new file mode 100644 index 00000000..54a1c165 --- /dev/null +++ b/src/mistralai/client/models/completionfinetunedmodel.py @@ -0,0 +1,110 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: f08c10d149f5 + +from __future__ import annotations +from .finetunedmodelcapabilities import ( + FineTunedModelCapabilities, + FineTunedModelCapabilitiesTypedDict, +) +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class CompletionFineTunedModelTypedDict(TypedDict): + id: str + created: int + owned_by: str + workspace_id: str + root: str + root_version: str + archived: bool + capabilities: FineTunedModelCapabilitiesTypedDict + job: str + object: Literal["model"] + name: NotRequired[Nullable[str]] + description: NotRequired[Nullable[str]] + max_context_length: NotRequired[int] + aliases: NotRequired[List[str]] + model_type: Literal["completion"] + + +class CompletionFineTunedModel(BaseModel): + id: str + + created: int + + owned_by: str + + workspace_id: str + + root: str + + root_version: str + + archived: bool + + capabilities: FineTunedModelCapabilities + + job: str + + object: Annotated[ + Annotated[Optional[Literal["model"]], AfterValidator(validate_const("model"))], + pydantic.Field(alias="object"), + ] = "model" + + name: OptionalNullable[str] = UNSET + + description: OptionalNullable[str] = UNSET + + max_context_length: Optional[int] = 32768 + + aliases: Optional[List[str]] = None + + model_type: Annotated[ + Annotated[Literal["completion"], AfterValidator(validate_const("completion"))], + pydantic.Field(alias="model_type"), + ] = "completion" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + ["object", "name", "description", "max_context_length", "aliases"] + ) + nullable_fields = set(["name", "description"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + CompletionFineTunedModel.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/completionfinetuningjob.py b/src/mistralai/client/models/completionfinetuningjob.py new file mode 100644 index 00000000..1bf0a730 --- /dev/null +++ b/src/mistralai/client/models/completionfinetuningjob.py @@ -0,0 +1,227 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: c242237efe9b + +from __future__ import annotations +from .completiontrainingparameters import ( + CompletionTrainingParameters, + CompletionTrainingParametersTypedDict, +) +from .githubrepository import GithubRepository, GithubRepositoryTypedDict +from .jobmetadata import JobMetadata, JobMetadataTypedDict +from .wandbintegrationresult import ( + WandbIntegrationResult, + WandbIntegrationResultTypedDict, +) +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, + UnrecognizedStr, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import ConfigDict, model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Any, List, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypedDict + + +CompletionFineTuningJobStatus = Union[ + Literal[ + "QUEUED", + "STARTED", + "VALIDATING", + "VALIDATED", + "RUNNING", + "FAILED_VALIDATION", + "FAILED", + "SUCCESS", + "CANCELLED", + "CANCELLATION_REQUESTED", + ], + UnrecognizedStr, +] +r"""The current status of the fine-tuning job.""" + + +CompletionFineTuningJobIntegrationTypedDict = WandbIntegrationResultTypedDict + + +class UnknownCompletionFineTuningJobIntegration(BaseModel): + r"""A CompletionFineTuningJobIntegration variant the SDK doesn't recognize. Preserves the raw payload.""" + + type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_COMPLETION_FINE_TUNING_JOB_INTEGRATION_VARIANTS: dict[str, Any] = { + "wandb": WandbIntegrationResult, +} + + +CompletionFineTuningJobIntegration = WandbIntegrationResult + + +CompletionFineTuningJobRepositoryTypedDict = GithubRepositoryTypedDict + + +class UnknownCompletionFineTuningJobRepository(BaseModel): + r"""A CompletionFineTuningJobRepository variant the SDK doesn't recognize. Preserves the raw payload.""" + + type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_COMPLETION_FINE_TUNING_JOB_REPOSITORY_VARIANTS: dict[str, Any] = { + "github": GithubRepository, +} + + +CompletionFineTuningJobRepository = GithubRepository + + +class CompletionFineTuningJobTypedDict(TypedDict): + id: str + r"""The ID of the job.""" + auto_start: bool + model: str + status: CompletionFineTuningJobStatus + r"""The current status of the fine-tuning job.""" + created_at: int + r"""The UNIX timestamp (in seconds) for when the fine-tuning job was created.""" + modified_at: int + r"""The UNIX timestamp (in seconds) for when the fine-tuning job was last modified.""" + training_files: List[str] + r"""A list containing the IDs of uploaded files that contain training data.""" + hyperparameters: CompletionTrainingParametersTypedDict + validation_files: NotRequired[Nullable[List[str]]] + r"""A list containing the IDs of uploaded files that contain validation data.""" + object: Literal["job"] + r"""The object type of the fine-tuning job.""" + fine_tuned_model: NotRequired[Nullable[str]] + r"""The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running.""" + suffix: NotRequired[Nullable[str]] + r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + integrations: NotRequired[ + Nullable[List[CompletionFineTuningJobIntegrationTypedDict]] + ] + r"""A list of integrations enabled for your fine-tuning job.""" + trained_tokens: NotRequired[Nullable[int]] + r"""Total number of tokens trained.""" + metadata: NotRequired[Nullable[JobMetadataTypedDict]] + job_type: Literal["completion"] + r"""The type of job (`FT` for fine-tuning).""" + repositories: NotRequired[List[CompletionFineTuningJobRepositoryTypedDict]] + + +class CompletionFineTuningJob(BaseModel): + id: str + r"""The ID of the job.""" + + auto_start: bool + + model: str + + status: CompletionFineTuningJobStatus + r"""The current status of the fine-tuning job.""" + + created_at: int + r"""The UNIX timestamp (in seconds) for when the fine-tuning job was created.""" + + modified_at: int + r"""The UNIX timestamp (in seconds) for when the fine-tuning job was last modified.""" + + training_files: List[str] + r"""A list containing the IDs of uploaded files that contain training data.""" + + hyperparameters: CompletionTrainingParameters + + validation_files: OptionalNullable[List[str]] = UNSET + r"""A list containing the IDs of uploaded files that contain validation data.""" + + object: Annotated[ + Annotated[Optional[Literal["job"]], AfterValidator(validate_const("job"))], + pydantic.Field(alias="object"), + ] = "job" + r"""The object type of the fine-tuning job.""" + + fine_tuned_model: OptionalNullable[str] = UNSET + r"""The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running.""" + + suffix: OptionalNullable[str] = UNSET + r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + + integrations: OptionalNullable[List[CompletionFineTuningJobIntegration]] = UNSET + r"""A list of integrations enabled for your fine-tuning job.""" + + trained_tokens: OptionalNullable[int] = UNSET + r"""Total number of tokens trained.""" + + metadata: OptionalNullable[JobMetadata] = UNSET + + job_type: Annotated[ + Annotated[Literal["completion"], AfterValidator(validate_const("completion"))], + pydantic.Field(alias="job_type"), + ] = "completion" + r"""The type of job (`FT` for fine-tuning).""" + + repositories: Optional[List[CompletionFineTuningJobRepository]] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "validation_files", + "object", + "fine_tuned_model", + "suffix", + "integrations", + "trained_tokens", + "metadata", + "repositories", + ] + ) + nullable_fields = set( + [ + "validation_files", + "fine_tuned_model", + "suffix", + "integrations", + "trained_tokens", + "metadata", + ] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + CompletionFineTuningJob.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/completionfinetuningjobdetails.py b/src/mistralai/client/models/completionfinetuningjobdetails.py new file mode 100644 index 00000000..cb787021 --- /dev/null +++ b/src/mistralai/client/models/completionfinetuningjobdetails.py @@ -0,0 +1,216 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: e8379265af48 + +from __future__ import annotations +from .checkpoint import Checkpoint, CheckpointTypedDict +from .completiontrainingparameters import ( + CompletionTrainingParameters, + CompletionTrainingParametersTypedDict, +) +from .event import Event, EventTypedDict +from .githubrepository import GithubRepository, GithubRepositoryTypedDict +from .jobmetadata import JobMetadata, JobMetadataTypedDict +from .wandbintegrationresult import ( + WandbIntegrationResult, + WandbIntegrationResultTypedDict, +) +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, + UnrecognizedStr, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import ConfigDict, model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Any, List, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypedDict + + +CompletionFineTuningJobDetailsStatus = Union[ + Literal[ + "QUEUED", + "STARTED", + "VALIDATING", + "VALIDATED", + "RUNNING", + "FAILED_VALIDATION", + "FAILED", + "SUCCESS", + "CANCELLED", + "CANCELLATION_REQUESTED", + ], + UnrecognizedStr, +] + + +CompletionFineTuningJobDetailsIntegrationTypedDict = WandbIntegrationResultTypedDict + + +class UnknownCompletionFineTuningJobDetailsIntegration(BaseModel): + r"""A CompletionFineTuningJobDetailsIntegration variant the SDK doesn't recognize. Preserves the raw payload.""" + + type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_COMPLETION_FINE_TUNING_JOB_DETAILS_INTEGRATION_VARIANTS: dict[str, Any] = { + "wandb": WandbIntegrationResult, +} + + +CompletionFineTuningJobDetailsIntegration = WandbIntegrationResult + + +CompletionFineTuningJobDetailsRepositoryTypedDict = GithubRepositoryTypedDict + + +class UnknownCompletionFineTuningJobDetailsRepository(BaseModel): + r"""A CompletionFineTuningJobDetailsRepository variant the SDK doesn't recognize. Preserves the raw payload.""" + + type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_COMPLETION_FINE_TUNING_JOB_DETAILS_REPOSITORY_VARIANTS: dict[str, Any] = { + "github": GithubRepository, +} + + +CompletionFineTuningJobDetailsRepository = GithubRepository + + +class CompletionFineTuningJobDetailsTypedDict(TypedDict): + id: str + auto_start: bool + model: str + status: CompletionFineTuningJobDetailsStatus + created_at: int + modified_at: int + training_files: List[str] + hyperparameters: CompletionTrainingParametersTypedDict + validation_files: NotRequired[Nullable[List[str]]] + object: Literal["job"] + fine_tuned_model: NotRequired[Nullable[str]] + suffix: NotRequired[Nullable[str]] + integrations: NotRequired[ + Nullable[List[CompletionFineTuningJobDetailsIntegrationTypedDict]] + ] + trained_tokens: NotRequired[Nullable[int]] + metadata: NotRequired[Nullable[JobMetadataTypedDict]] + job_type: Literal["completion"] + repositories: NotRequired[List[CompletionFineTuningJobDetailsRepositoryTypedDict]] + events: NotRequired[List[EventTypedDict]] + r"""Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here.""" + checkpoints: NotRequired[List[CheckpointTypedDict]] + + +class CompletionFineTuningJobDetails(BaseModel): + id: str + + auto_start: bool + + model: str + + status: CompletionFineTuningJobDetailsStatus + + created_at: int + + modified_at: int + + training_files: List[str] + + hyperparameters: CompletionTrainingParameters + + validation_files: OptionalNullable[List[str]] = UNSET + + object: Annotated[ + Annotated[Optional[Literal["job"]], AfterValidator(validate_const("job"))], + pydantic.Field(alias="object"), + ] = "job" + + fine_tuned_model: OptionalNullable[str] = UNSET + + suffix: OptionalNullable[str] = UNSET + + integrations: OptionalNullable[List[CompletionFineTuningJobDetailsIntegration]] = ( + UNSET + ) + + trained_tokens: OptionalNullable[int] = UNSET + + metadata: OptionalNullable[JobMetadata] = UNSET + + job_type: Annotated[ + Annotated[Literal["completion"], AfterValidator(validate_const("completion"))], + pydantic.Field(alias="job_type"), + ] = "completion" + + repositories: Optional[List[CompletionFineTuningJobDetailsRepository]] = None + + events: Optional[List[Event]] = None + r"""Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here.""" + + checkpoints: Optional[List[Checkpoint]] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "validation_files", + "object", + "fine_tuned_model", + "suffix", + "integrations", + "trained_tokens", + "metadata", + "repositories", + "events", + "checkpoints", + ] + ) + nullable_fields = set( + [ + "validation_files", + "fine_tuned_model", + "suffix", + "integrations", + "trained_tokens", + "metadata", + ] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + CompletionFineTuningJobDetails.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/completionresponsestreamchoice.py b/src/mistralai/client/models/completionresponsestreamchoice.py new file mode 100644 index 00000000..a52ae892 --- /dev/null +++ b/src/mistralai/client/models/completionresponsestreamchoice.py @@ -0,0 +1,48 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 5969a6bc07f3 + +from __future__ import annotations +from .deltamessage import DeltaMessage, DeltaMessageTypedDict +from mistralai.client.types import BaseModel, Nullable, UNSET_SENTINEL, UnrecognizedStr +from pydantic import model_serializer +from typing import Literal, Union +from typing_extensions import TypedDict + + +CompletionResponseStreamChoiceFinishReason = Union[ + Literal[ + "stop", + "length", + "error", + "tool_calls", + ], + UnrecognizedStr, +] + + +class CompletionResponseStreamChoiceTypedDict(TypedDict): + index: int + delta: DeltaMessageTypedDict + finish_reason: Nullable[CompletionResponseStreamChoiceFinishReason] + + +class CompletionResponseStreamChoice(BaseModel): + index: int + + delta: DeltaMessage + + finish_reason: Nullable[CompletionResponseStreamChoiceFinishReason] + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + m[k] = val + + return m diff --git a/src/mistralai/client/models/completiontrainingparameters.py b/src/mistralai/client/models/completiontrainingparameters.py new file mode 100644 index 00000000..ca50a7ad --- /dev/null +++ b/src/mistralai/client/models/completiontrainingparameters.py @@ -0,0 +1,84 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: be202ea0d5a6 + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class CompletionTrainingParametersTypedDict(TypedDict): + training_steps: NotRequired[Nullable[int]] + learning_rate: NotRequired[float] + weight_decay: NotRequired[Nullable[float]] + warmup_fraction: NotRequired[Nullable[float]] + epochs: NotRequired[Nullable[float]] + seq_len: NotRequired[Nullable[int]] + fim_ratio: NotRequired[Nullable[float]] + + +class CompletionTrainingParameters(BaseModel): + training_steps: OptionalNullable[int] = UNSET + + learning_rate: Optional[float] = 0.0001 + + weight_decay: OptionalNullable[float] = UNSET + + warmup_fraction: OptionalNullable[float] = UNSET + + epochs: OptionalNullable[float] = UNSET + + seq_len: OptionalNullable[int] = UNSET + + fim_ratio: OptionalNullable[float] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "training_steps", + "learning_rate", + "weight_decay", + "warmup_fraction", + "epochs", + "seq_len", + "fim_ratio", + ] + ) + nullable_fields = set( + [ + "training_steps", + "weight_decay", + "warmup_fraction", + "epochs", + "seq_len", + "fim_ratio", + ] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/contentchunk.py b/src/mistralai/client/models/contentchunk.py new file mode 100644 index 00000000..e3de7591 --- /dev/null +++ b/src/mistralai/client/models/contentchunk.py @@ -0,0 +1,76 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: c007f5ee0325 + +from __future__ import annotations +from .audiochunk import AudioChunk, AudioChunkTypedDict +from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict +from .filechunk import FileChunk, FileChunkTypedDict +from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict +from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict +from .textchunk import TextChunk, TextChunkTypedDict +from .thinkchunk import ThinkChunk, ThinkChunkTypedDict +from functools import partial +from mistralai.client.types import BaseModel +from mistralai.client.utils.unions import parse_open_union +from pydantic import ConfigDict +from pydantic.functional_validators import BeforeValidator +from typing import Any, Literal, Union +from typing_extensions import Annotated, TypeAliasType + + +ContentChunkTypedDict = TypeAliasType( + "ContentChunkTypedDict", + Union[ + TextChunkTypedDict, + ImageURLChunkTypedDict, + ReferenceChunkTypedDict, + FileChunkTypedDict, + AudioChunkTypedDict, + DocumentURLChunkTypedDict, + ThinkChunkTypedDict, + ], +) + + +class UnknownContentChunk(BaseModel): + r"""A ContentChunk variant the SDK doesn't recognize. Preserves the raw payload.""" + + type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_CONTENT_CHUNK_VARIANTS: dict[str, Any] = { + "image_url": ImageURLChunk, + "document_url": DocumentURLChunk, + "text": TextChunk, + "reference": ReferenceChunk, + "file": FileChunk, + "thinking": ThinkChunk, + "input_audio": AudioChunk, +} + + +ContentChunk = Annotated[ + Union[ + ImageURLChunk, + DocumentURLChunk, + TextChunk, + ReferenceChunk, + FileChunk, + ThinkChunk, + AudioChunk, + UnknownContentChunk, + ], + BeforeValidator( + partial( + parse_open_union, + disc_key="type", + variants=_CONTENT_CHUNK_VARIANTS, + unknown_cls=UnknownContentChunk, + union_name="ContentChunk", + ) + ), +] diff --git a/src/mistralai/client/models/conversationappendrequest.py b/src/mistralai/client/models/conversationappendrequest.py new file mode 100644 index 00000000..386714fd --- /dev/null +++ b/src/mistralai/client/models/conversationappendrequest.py @@ -0,0 +1,84 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 81ce529e0865 + +from __future__ import annotations +from .completionargs import CompletionArgs, CompletionArgsTypedDict +from .conversationinputs import ConversationInputs, ConversationInputsTypedDict +from .toolcallconfirmation import ToolCallConfirmation, ToolCallConfirmationTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List, Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +ConversationAppendRequestHandoffExecution = Literal[ + "client", + "server", +] + + +class ConversationAppendRequestTypedDict(TypedDict): + inputs: NotRequired[ConversationInputsTypedDict] + stream: NotRequired[bool] + store: NotRequired[bool] + r"""Whether to store the results into our servers or not.""" + handoff_execution: NotRequired[ConversationAppendRequestHandoffExecution] + completion_args: NotRequired[CompletionArgsTypedDict] + r"""White-listed arguments from the completion API""" + tool_confirmations: NotRequired[Nullable[List[ToolCallConfirmationTypedDict]]] + + +class ConversationAppendRequest(BaseModel): + inputs: Optional[ConversationInputs] = None + + stream: Optional[bool] = False + + store: Optional[bool] = True + r"""Whether to store the results into our servers or not.""" + + handoff_execution: Optional[ConversationAppendRequestHandoffExecution] = "server" + + completion_args: Optional[CompletionArgs] = None + r"""White-listed arguments from the completion API""" + + tool_confirmations: OptionalNullable[List[ToolCallConfirmation]] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "inputs", + "stream", + "store", + "handoff_execution", + "completion_args", + "tool_confirmations", + ] + ) + nullable_fields = set(["tool_confirmations"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/conversationappendstreamrequest.py b/src/mistralai/client/models/conversationappendstreamrequest.py new file mode 100644 index 00000000..32f6b148 --- /dev/null +++ b/src/mistralai/client/models/conversationappendstreamrequest.py @@ -0,0 +1,86 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 27ada745e6ad + +from __future__ import annotations +from .completionargs import CompletionArgs, CompletionArgsTypedDict +from .conversationinputs import ConversationInputs, ConversationInputsTypedDict +from .toolcallconfirmation import ToolCallConfirmation, ToolCallConfirmationTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List, Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +ConversationAppendStreamRequestHandoffExecution = Literal[ + "client", + "server", +] + + +class ConversationAppendStreamRequestTypedDict(TypedDict): + inputs: NotRequired[ConversationInputsTypedDict] + stream: NotRequired[bool] + store: NotRequired[bool] + r"""Whether to store the results into our servers or not.""" + handoff_execution: NotRequired[ConversationAppendStreamRequestHandoffExecution] + completion_args: NotRequired[CompletionArgsTypedDict] + r"""White-listed arguments from the completion API""" + tool_confirmations: NotRequired[Nullable[List[ToolCallConfirmationTypedDict]]] + + +class ConversationAppendStreamRequest(BaseModel): + inputs: Optional[ConversationInputs] = None + + stream: Optional[bool] = True + + store: Optional[bool] = True + r"""Whether to store the results into our servers or not.""" + + handoff_execution: Optional[ConversationAppendStreamRequestHandoffExecution] = ( + "server" + ) + + completion_args: Optional[CompletionArgs] = None + r"""White-listed arguments from the completion API""" + + tool_confirmations: OptionalNullable[List[ToolCallConfirmation]] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "inputs", + "stream", + "store", + "handoff_execution", + "completion_args", + "tool_confirmations", + ] + ) + nullable_fields = set(["tool_confirmations"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/conversationevents.py b/src/mistralai/client/models/conversationevents.py new file mode 100644 index 00000000..17812983 --- /dev/null +++ b/src/mistralai/client/models/conversationevents.py @@ -0,0 +1,114 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 8c8b08d853f6 + +from __future__ import annotations +from .agenthandoffdoneevent import AgentHandoffDoneEvent, AgentHandoffDoneEventTypedDict +from .agenthandoffstartedevent import ( + AgentHandoffStartedEvent, + AgentHandoffStartedEventTypedDict, +) +from .functioncallevent import FunctionCallEvent, FunctionCallEventTypedDict +from .messageoutputevent import MessageOutputEvent, MessageOutputEventTypedDict +from .responsedoneevent import ResponseDoneEvent, ResponseDoneEventTypedDict +from .responseerrorevent import ResponseErrorEvent, ResponseErrorEventTypedDict +from .responsestartedevent import ResponseStartedEvent, ResponseStartedEventTypedDict +from .ssetypes import SSETypes +from .toolexecutiondeltaevent import ( + ToolExecutionDeltaEvent, + ToolExecutionDeltaEventTypedDict, +) +from .toolexecutiondoneevent import ( + ToolExecutionDoneEvent, + ToolExecutionDoneEventTypedDict, +) +from .toolexecutionstartedevent import ( + ToolExecutionStartedEvent, + ToolExecutionStartedEventTypedDict, +) +from functools import partial +from mistralai.client.types import BaseModel +from mistralai.client.utils.unions import parse_open_union +from pydantic import ConfigDict +from pydantic.functional_validators import BeforeValidator +from typing import Any, Literal, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +ConversationEventsDataTypedDict = TypeAliasType( + "ConversationEventsDataTypedDict", + Union[ + ResponseStartedEventTypedDict, + ResponseDoneEventTypedDict, + ResponseErrorEventTypedDict, + ToolExecutionDeltaEventTypedDict, + ToolExecutionDoneEventTypedDict, + AgentHandoffStartedEventTypedDict, + AgentHandoffDoneEventTypedDict, + ToolExecutionStartedEventTypedDict, + MessageOutputEventTypedDict, + FunctionCallEventTypedDict, + ], +) + + +class UnknownConversationEventsData(BaseModel): + r"""A ConversationEventsData variant the SDK doesn't recognize. Preserves the raw payload.""" + + type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_CONVERSATION_EVENTS_DATA_VARIANTS: dict[str, Any] = { + "agent.handoff.done": AgentHandoffDoneEvent, + "agent.handoff.started": AgentHandoffStartedEvent, + "conversation.response.done": ResponseDoneEvent, + "conversation.response.error": ResponseErrorEvent, + "conversation.response.started": ResponseStartedEvent, + "function.call.delta": FunctionCallEvent, + "message.output.delta": MessageOutputEvent, + "tool.execution.delta": ToolExecutionDeltaEvent, + "tool.execution.done": ToolExecutionDoneEvent, + "tool.execution.started": ToolExecutionStartedEvent, +} + + +ConversationEventsData = Annotated[ + Union[ + AgentHandoffDoneEvent, + AgentHandoffStartedEvent, + ResponseDoneEvent, + ResponseErrorEvent, + ResponseStartedEvent, + FunctionCallEvent, + MessageOutputEvent, + ToolExecutionDeltaEvent, + ToolExecutionDoneEvent, + ToolExecutionStartedEvent, + UnknownConversationEventsData, + ], + BeforeValidator( + partial( + parse_open_union, + disc_key="type", + variants=_CONVERSATION_EVENTS_DATA_VARIANTS, + unknown_cls=UnknownConversationEventsData, + union_name="ConversationEventsData", + ) + ), +] + + +class ConversationEventsTypedDict(TypedDict): + event: SSETypes + r"""Server side events sent when streaming a conversation response.""" + data: ConversationEventsDataTypedDict + + +class ConversationEvents(BaseModel): + event: SSETypes + r"""Server side events sent when streaming a conversation response.""" + + data: ConversationEventsData diff --git a/src/mistralai/client/models/conversationhistory.py b/src/mistralai/client/models/conversationhistory.py new file mode 100644 index 00000000..ceef115b --- /dev/null +++ b/src/mistralai/client/models/conversationhistory.py @@ -0,0 +1,89 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 60a51ff1682b + +from __future__ import annotations +from .agenthandoffentry import AgentHandoffEntry, AgentHandoffEntryTypedDict +from .functioncallentry import FunctionCallEntry, FunctionCallEntryTypedDict +from .functionresultentry import FunctionResultEntry, FunctionResultEntryTypedDict +from .messageinputentry import MessageInputEntry, MessageInputEntryTypedDict +from .messageoutputentry import MessageOutputEntry, MessageOutputEntryTypedDict +from .toolexecutionentry import ToolExecutionEntry, ToolExecutionEntryTypedDict +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +EntryTypedDict = TypeAliasType( + "EntryTypedDict", + Union[ + FunctionResultEntryTypedDict, + MessageInputEntryTypedDict, + MessageOutputEntryTypedDict, + AgentHandoffEntryTypedDict, + ToolExecutionEntryTypedDict, + FunctionCallEntryTypedDict, + ], +) + + +Entry = TypeAliasType( + "Entry", + Union[ + FunctionResultEntry, + MessageInputEntry, + MessageOutputEntry, + AgentHandoffEntry, + ToolExecutionEntry, + FunctionCallEntry, + ], +) + + +class ConversationHistoryTypedDict(TypedDict): + r"""Retrieve all entries in a conversation.""" + + conversation_id: str + entries: List[EntryTypedDict] + object: Literal["conversation.history"] + + +class ConversationHistory(BaseModel): + r"""Retrieve all entries in a conversation.""" + + conversation_id: str + + entries: List[Entry] + + object: Annotated[ + Annotated[ + Optional[Literal["conversation.history"]], + AfterValidator(validate_const("conversation.history")), + ], + pydantic.Field(alias="object"), + ] = "conversation.history" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["object"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ConversationHistory.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/conversationinputs.py b/src/mistralai/client/models/conversationinputs.py new file mode 100644 index 00000000..7ce3ffc3 --- /dev/null +++ b/src/mistralai/client/models/conversationinputs.py @@ -0,0 +1,15 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 711b769f2c40 + +from __future__ import annotations +from .inputentries import InputEntries, InputEntriesTypedDict +from typing import List, Union +from typing_extensions import TypeAliasType + + +ConversationInputsTypedDict = TypeAliasType( + "ConversationInputsTypedDict", Union[str, List[InputEntriesTypedDict]] +) + + +ConversationInputs = TypeAliasType("ConversationInputs", Union[str, List[InputEntries]]) diff --git a/src/mistralai/client/models/conversationmessages.py b/src/mistralai/client/models/conversationmessages.py new file mode 100644 index 00000000..84664b62 --- /dev/null +++ b/src/mistralai/client/models/conversationmessages.py @@ -0,0 +1,58 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 011c39501c26 + +from __future__ import annotations +from .messageentries import MessageEntries, MessageEntriesTypedDict +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional +from typing_extensions import Annotated, TypedDict + + +class ConversationMessagesTypedDict(TypedDict): + r"""Similar to the conversation history but only keep the messages""" + + conversation_id: str + messages: List[MessageEntriesTypedDict] + object: Literal["conversation.messages"] + + +class ConversationMessages(BaseModel): + r"""Similar to the conversation history but only keep the messages""" + + conversation_id: str + + messages: List[MessageEntries] + + object: Annotated[ + Annotated[ + Optional[Literal["conversation.messages"]], + AfterValidator(validate_const("conversation.messages")), + ], + pydantic.Field(alias="object"), + ] = "conversation.messages" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["object"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ConversationMessages.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/conversationrequest.py b/src/mistralai/client/models/conversationrequest.py new file mode 100644 index 00000000..83d599eb --- /dev/null +++ b/src/mistralai/client/models/conversationrequest.py @@ -0,0 +1,163 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 58e3ae67f149 + +from __future__ import annotations +from .codeinterpretertool import CodeInterpreterTool, CodeInterpreterToolTypedDict +from .completionargs import CompletionArgs, CompletionArgsTypedDict +from .conversationinputs import ConversationInputs, ConversationInputsTypedDict +from .documentlibrarytool import DocumentLibraryTool, DocumentLibraryToolTypedDict +from .functiontool import FunctionTool, FunctionToolTypedDict +from .imagegenerationtool import ImageGenerationTool, ImageGenerationToolTypedDict +from .websearchpremiumtool import WebSearchPremiumTool, WebSearchPremiumToolTypedDict +from .websearchtool import WebSearchTool, WebSearchToolTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import Field, model_serializer +from typing import Any, Dict, List, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +ConversationRequestHandoffExecution = Literal[ + "client", + "server", +] + + +ConversationRequestToolTypedDict = TypeAliasType( + "ConversationRequestToolTypedDict", + Union[ + FunctionToolTypedDict, + WebSearchToolTypedDict, + WebSearchPremiumToolTypedDict, + CodeInterpreterToolTypedDict, + ImageGenerationToolTypedDict, + DocumentLibraryToolTypedDict, + ], +) + + +ConversationRequestTool = Annotated[ + Union[ + CodeInterpreterTool, + DocumentLibraryTool, + FunctionTool, + ImageGenerationTool, + WebSearchTool, + WebSearchPremiumTool, + ], + Field(discriminator="type"), +] + + +ConversationRequestAgentVersionTypedDict = TypeAliasType( + "ConversationRequestAgentVersionTypedDict", Union[str, int] +) + + +ConversationRequestAgentVersion = TypeAliasType( + "ConversationRequestAgentVersion", Union[str, int] +) + + +class ConversationRequestTypedDict(TypedDict): + inputs: ConversationInputsTypedDict + stream: NotRequired[bool] + store: NotRequired[Nullable[bool]] + handoff_execution: NotRequired[Nullable[ConversationRequestHandoffExecution]] + instructions: NotRequired[Nullable[str]] + tools: NotRequired[List[ConversationRequestToolTypedDict]] + r"""List of tools which are available to the model during the conversation.""" + completion_args: NotRequired[Nullable[CompletionArgsTypedDict]] + name: NotRequired[Nullable[str]] + description: NotRequired[Nullable[str]] + metadata: NotRequired[Nullable[Dict[str, Any]]] + agent_id: NotRequired[Nullable[str]] + agent_version: NotRequired[Nullable[ConversationRequestAgentVersionTypedDict]] + model: NotRequired[Nullable[str]] + + +class ConversationRequest(BaseModel): + inputs: ConversationInputs + + stream: Optional[bool] = False + + store: OptionalNullable[bool] = UNSET + + handoff_execution: OptionalNullable[ConversationRequestHandoffExecution] = UNSET + + instructions: OptionalNullable[str] = UNSET + + tools: Optional[List[ConversationRequestTool]] = None + r"""List of tools which are available to the model during the conversation.""" + + completion_args: OptionalNullable[CompletionArgs] = UNSET + + name: OptionalNullable[str] = UNSET + + description: OptionalNullable[str] = UNSET + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + + agent_id: OptionalNullable[str] = UNSET + + agent_version: OptionalNullable[ConversationRequestAgentVersion] = UNSET + + model: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "stream", + "store", + "handoff_execution", + "instructions", + "tools", + "completion_args", + "name", + "description", + "metadata", + "agent_id", + "agent_version", + "model", + ] + ) + nullable_fields = set( + [ + "store", + "handoff_execution", + "instructions", + "completion_args", + "name", + "description", + "metadata", + "agent_id", + "agent_version", + "model", + ] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/conversationresponse.py b/src/mistralai/client/models/conversationresponse.py new file mode 100644 index 00000000..f6c10969 --- /dev/null +++ b/src/mistralai/client/models/conversationresponse.py @@ -0,0 +1,82 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: ad7a8472c7bf + +from __future__ import annotations +from .agenthandoffentry import AgentHandoffEntry, AgentHandoffEntryTypedDict +from .conversationusageinfo import ConversationUsageInfo, ConversationUsageInfoTypedDict +from .functioncallentry import FunctionCallEntry, FunctionCallEntryTypedDict +from .messageoutputentry import MessageOutputEntry, MessageOutputEntryTypedDict +from .toolexecutionentry import ToolExecutionEntry, ToolExecutionEntryTypedDict +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +OutputTypedDict = TypeAliasType( + "OutputTypedDict", + Union[ + MessageOutputEntryTypedDict, + AgentHandoffEntryTypedDict, + ToolExecutionEntryTypedDict, + FunctionCallEntryTypedDict, + ], +) + + +Output = TypeAliasType( + "Output", + Union[MessageOutputEntry, AgentHandoffEntry, ToolExecutionEntry, FunctionCallEntry], +) + + +class ConversationResponseTypedDict(TypedDict): + r"""The response after appending new entries to the conversation.""" + + conversation_id: str + outputs: List[OutputTypedDict] + usage: ConversationUsageInfoTypedDict + object: Literal["conversation.response"] + + +class ConversationResponse(BaseModel): + r"""The response after appending new entries to the conversation.""" + + conversation_id: str + + outputs: List[Output] + + usage: ConversationUsageInfo + + object: Annotated[ + Annotated[ + Optional[Literal["conversation.response"]], + AfterValidator(validate_const("conversation.response")), + ], + pydantic.Field(alias="object"), + ] = "conversation.response" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["object"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ConversationResponse.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/conversationrestartrequest.py b/src/mistralai/client/models/conversationrestartrequest.py new file mode 100644 index 00000000..7ae16aff --- /dev/null +++ b/src/mistralai/client/models/conversationrestartrequest.py @@ -0,0 +1,112 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 681d90d50514 + +from __future__ import annotations +from .completionargs import CompletionArgs, CompletionArgsTypedDict +from .conversationinputs import ConversationInputs, ConversationInputsTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Any, Dict, Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +ConversationRestartRequestHandoffExecution = Literal[ + "client", + "server", +] + + +ConversationRestartRequestAgentVersionTypedDict = TypeAliasType( + "ConversationRestartRequestAgentVersionTypedDict", Union[str, int] +) +r"""Specific version of the agent to use when restarting. If not provided, uses the current version.""" + + +ConversationRestartRequestAgentVersion = TypeAliasType( + "ConversationRestartRequestAgentVersion", Union[str, int] +) +r"""Specific version of the agent to use when restarting. If not provided, uses the current version.""" + + +class ConversationRestartRequestTypedDict(TypedDict): + r"""Request to restart a new conversation from a given entry in the conversation.""" + + from_entry_id: str + inputs: NotRequired[ConversationInputsTypedDict] + stream: NotRequired[bool] + store: NotRequired[bool] + r"""Whether to store the results into our servers or not.""" + handoff_execution: NotRequired[ConversationRestartRequestHandoffExecution] + completion_args: NotRequired[CompletionArgsTypedDict] + r"""White-listed arguments from the completion API""" + metadata: NotRequired[Nullable[Dict[str, Any]]] + r"""Custom metadata for the conversation.""" + agent_version: NotRequired[ + Nullable[ConversationRestartRequestAgentVersionTypedDict] + ] + r"""Specific version of the agent to use when restarting. If not provided, uses the current version.""" + + +class ConversationRestartRequest(BaseModel): + r"""Request to restart a new conversation from a given entry in the conversation.""" + + from_entry_id: str + + inputs: Optional[ConversationInputs] = None + + stream: Optional[bool] = False + + store: Optional[bool] = True + r"""Whether to store the results into our servers or not.""" + + handoff_execution: Optional[ConversationRestartRequestHandoffExecution] = "server" + + completion_args: Optional[CompletionArgs] = None + r"""White-listed arguments from the completion API""" + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + r"""Custom metadata for the conversation.""" + + agent_version: OptionalNullable[ConversationRestartRequestAgentVersion] = UNSET + r"""Specific version of the agent to use when restarting. If not provided, uses the current version.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "inputs", + "stream", + "store", + "handoff_execution", + "completion_args", + "metadata", + "agent_version", + ] + ) + nullable_fields = set(["metadata", "agent_version"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/conversationrestartstreamrequest.py b/src/mistralai/client/models/conversationrestartstreamrequest.py new file mode 100644 index 00000000..0e247261 --- /dev/null +++ b/src/mistralai/client/models/conversationrestartstreamrequest.py @@ -0,0 +1,116 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 521c2b5bfb2b + +from __future__ import annotations +from .completionargs import CompletionArgs, CompletionArgsTypedDict +from .conversationinputs import ConversationInputs, ConversationInputsTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Any, Dict, Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +ConversationRestartStreamRequestHandoffExecution = Literal[ + "client", + "server", +] + + +ConversationRestartStreamRequestAgentVersionTypedDict = TypeAliasType( + "ConversationRestartStreamRequestAgentVersionTypedDict", Union[str, int] +) +r"""Specific version of the agent to use when restarting. If not provided, uses the current version.""" + + +ConversationRestartStreamRequestAgentVersion = TypeAliasType( + "ConversationRestartStreamRequestAgentVersion", Union[str, int] +) +r"""Specific version of the agent to use when restarting. If not provided, uses the current version.""" + + +class ConversationRestartStreamRequestTypedDict(TypedDict): + r"""Request to restart a new conversation from a given entry in the conversation.""" + + from_entry_id: str + inputs: NotRequired[ConversationInputsTypedDict] + stream: NotRequired[bool] + store: NotRequired[bool] + r"""Whether to store the results into our servers or not.""" + handoff_execution: NotRequired[ConversationRestartStreamRequestHandoffExecution] + completion_args: NotRequired[CompletionArgsTypedDict] + r"""White-listed arguments from the completion API""" + metadata: NotRequired[Nullable[Dict[str, Any]]] + r"""Custom metadata for the conversation.""" + agent_version: NotRequired[ + Nullable[ConversationRestartStreamRequestAgentVersionTypedDict] + ] + r"""Specific version of the agent to use when restarting. If not provided, uses the current version.""" + + +class ConversationRestartStreamRequest(BaseModel): + r"""Request to restart a new conversation from a given entry in the conversation.""" + + from_entry_id: str + + inputs: Optional[ConversationInputs] = None + + stream: Optional[bool] = True + + store: Optional[bool] = True + r"""Whether to store the results into our servers or not.""" + + handoff_execution: Optional[ConversationRestartStreamRequestHandoffExecution] = ( + "server" + ) + + completion_args: Optional[CompletionArgs] = None + r"""White-listed arguments from the completion API""" + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + r"""Custom metadata for the conversation.""" + + agent_version: OptionalNullable[ConversationRestartStreamRequestAgentVersion] = ( + UNSET + ) + r"""Specific version of the agent to use when restarting. If not provided, uses the current version.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "inputs", + "stream", + "store", + "handoff_execution", + "completion_args", + "metadata", + "agent_version", + ] + ) + nullable_fields = set(["metadata", "agent_version"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/conversationstreamrequest.py b/src/mistralai/client/models/conversationstreamrequest.py new file mode 100644 index 00000000..a20dccae --- /dev/null +++ b/src/mistralai/client/models/conversationstreamrequest.py @@ -0,0 +1,165 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 58d633507527 + +from __future__ import annotations +from .codeinterpretertool import CodeInterpreterTool, CodeInterpreterToolTypedDict +from .completionargs import CompletionArgs, CompletionArgsTypedDict +from .conversationinputs import ConversationInputs, ConversationInputsTypedDict +from .documentlibrarytool import DocumentLibraryTool, DocumentLibraryToolTypedDict +from .functiontool import FunctionTool, FunctionToolTypedDict +from .imagegenerationtool import ImageGenerationTool, ImageGenerationToolTypedDict +from .websearchpremiumtool import WebSearchPremiumTool, WebSearchPremiumToolTypedDict +from .websearchtool import WebSearchTool, WebSearchToolTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import Field, model_serializer +from typing import Any, Dict, List, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +ConversationStreamRequestHandoffExecution = Literal[ + "client", + "server", +] + + +ConversationStreamRequestToolTypedDict = TypeAliasType( + "ConversationStreamRequestToolTypedDict", + Union[ + FunctionToolTypedDict, + WebSearchToolTypedDict, + WebSearchPremiumToolTypedDict, + CodeInterpreterToolTypedDict, + ImageGenerationToolTypedDict, + DocumentLibraryToolTypedDict, + ], +) + + +ConversationStreamRequestTool = Annotated[ + Union[ + CodeInterpreterTool, + DocumentLibraryTool, + FunctionTool, + ImageGenerationTool, + WebSearchTool, + WebSearchPremiumTool, + ], + Field(discriminator="type"), +] + + +ConversationStreamRequestAgentVersionTypedDict = TypeAliasType( + "ConversationStreamRequestAgentVersionTypedDict", Union[str, int] +) + + +ConversationStreamRequestAgentVersion = TypeAliasType( + "ConversationStreamRequestAgentVersion", Union[str, int] +) + + +class ConversationStreamRequestTypedDict(TypedDict): + inputs: ConversationInputsTypedDict + stream: NotRequired[bool] + store: NotRequired[Nullable[bool]] + handoff_execution: NotRequired[Nullable[ConversationStreamRequestHandoffExecution]] + instructions: NotRequired[Nullable[str]] + tools: NotRequired[List[ConversationStreamRequestToolTypedDict]] + r"""List of tools which are available to the model during the conversation.""" + completion_args: NotRequired[Nullable[CompletionArgsTypedDict]] + name: NotRequired[Nullable[str]] + description: NotRequired[Nullable[str]] + metadata: NotRequired[Nullable[Dict[str, Any]]] + agent_id: NotRequired[Nullable[str]] + agent_version: NotRequired[Nullable[ConversationStreamRequestAgentVersionTypedDict]] + model: NotRequired[Nullable[str]] + + +class ConversationStreamRequest(BaseModel): + inputs: ConversationInputs + + stream: Optional[bool] = True + + store: OptionalNullable[bool] = UNSET + + handoff_execution: OptionalNullable[ConversationStreamRequestHandoffExecution] = ( + UNSET + ) + + instructions: OptionalNullable[str] = UNSET + + tools: Optional[List[ConversationStreamRequestTool]] = None + r"""List of tools which are available to the model during the conversation.""" + + completion_args: OptionalNullable[CompletionArgs] = UNSET + + name: OptionalNullable[str] = UNSET + + description: OptionalNullable[str] = UNSET + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + + agent_id: OptionalNullable[str] = UNSET + + agent_version: OptionalNullable[ConversationStreamRequestAgentVersion] = UNSET + + model: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "stream", + "store", + "handoff_execution", + "instructions", + "tools", + "completion_args", + "name", + "description", + "metadata", + "agent_id", + "agent_version", + "model", + ] + ) + nullable_fields = set( + [ + "store", + "handoff_execution", + "instructions", + "completion_args", + "name", + "description", + "metadata", + "agent_id", + "agent_version", + "model", + ] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/conversationthinkchunk.py b/src/mistralai/client/models/conversationthinkchunk.py new file mode 100644 index 00000000..e0e172e3 --- /dev/null +++ b/src/mistralai/client/models/conversationthinkchunk.py @@ -0,0 +1,65 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 77e59cde5c0f + +from __future__ import annotations +from .textchunk import TextChunk, TextChunkTypedDict +from .toolreferencechunk import ToolReferenceChunk, ToolReferenceChunkTypedDict +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +ConversationThinkChunkThinkingTypedDict = TypeAliasType( + "ConversationThinkChunkThinkingTypedDict", + Union[TextChunkTypedDict, ToolReferenceChunkTypedDict], +) + + +ConversationThinkChunkThinking = TypeAliasType( + "ConversationThinkChunkThinking", Union[TextChunk, ToolReferenceChunk] +) + + +class ConversationThinkChunkTypedDict(TypedDict): + thinking: List[ConversationThinkChunkThinkingTypedDict] + type: Literal["thinking"] + closed: NotRequired[bool] + + +class ConversationThinkChunk(BaseModel): + thinking: List[ConversationThinkChunkThinking] + + type: Annotated[ + Annotated[ + Optional[Literal["thinking"]], AfterValidator(validate_const("thinking")) + ], + pydantic.Field(alias="type"), + ] = "thinking" + + closed: Optional[bool] = True + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type", "closed"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ConversationThinkChunk.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/conversationusageinfo.py b/src/mistralai/client/models/conversationusageinfo.py new file mode 100644 index 00000000..1e80f89e --- /dev/null +++ b/src/mistralai/client/models/conversationusageinfo.py @@ -0,0 +1,67 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 6685e3b50b50 + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Dict, Optional +from typing_extensions import NotRequired, TypedDict + + +class ConversationUsageInfoTypedDict(TypedDict): + prompt_tokens: NotRequired[int] + completion_tokens: NotRequired[int] + total_tokens: NotRequired[int] + connector_tokens: NotRequired[Nullable[int]] + connectors: NotRequired[Nullable[Dict[str, int]]] + + +class ConversationUsageInfo(BaseModel): + prompt_tokens: Optional[int] = 0 + + completion_tokens: Optional[int] = 0 + + total_tokens: Optional[int] = 0 + + connector_tokens: OptionalNullable[int] = UNSET + + connectors: OptionalNullable[Dict[str, int]] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "prompt_tokens", + "completion_tokens", + "total_tokens", + "connector_tokens", + "connectors", + ] + ) + nullable_fields = set(["connector_tokens", "connectors"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/createagentrequest.py b/src/mistralai/client/models/createagentrequest.py new file mode 100644 index 00000000..54b09880 --- /dev/null +++ b/src/mistralai/client/models/createagentrequest.py @@ -0,0 +1,122 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 442629bd914b + +from __future__ import annotations +from .codeinterpretertool import CodeInterpreterTool, CodeInterpreterToolTypedDict +from .completionargs import CompletionArgs, CompletionArgsTypedDict +from .documentlibrarytool import DocumentLibraryTool, DocumentLibraryToolTypedDict +from .functiontool import FunctionTool, FunctionToolTypedDict +from .imagegenerationtool import ImageGenerationTool, ImageGenerationToolTypedDict +from .websearchpremiumtool import WebSearchPremiumTool, WebSearchPremiumToolTypedDict +from .websearchtool import WebSearchTool, WebSearchToolTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import Field, model_serializer +from typing import Any, Dict, List, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +CreateAgentRequestToolTypedDict = TypeAliasType( + "CreateAgentRequestToolTypedDict", + Union[ + FunctionToolTypedDict, + WebSearchToolTypedDict, + WebSearchPremiumToolTypedDict, + CodeInterpreterToolTypedDict, + ImageGenerationToolTypedDict, + DocumentLibraryToolTypedDict, + ], +) + + +CreateAgentRequestTool = Annotated[ + Union[ + CodeInterpreterTool, + DocumentLibraryTool, + FunctionTool, + ImageGenerationTool, + WebSearchTool, + WebSearchPremiumTool, + ], + Field(discriminator="type"), +] + + +class CreateAgentRequestTypedDict(TypedDict): + model: str + name: str + instructions: NotRequired[Nullable[str]] + r"""Instruction prompt the model will follow during the conversation.""" + tools: NotRequired[List[CreateAgentRequestToolTypedDict]] + r"""List of tools which are available to the model during the conversation.""" + completion_args: NotRequired[CompletionArgsTypedDict] + r"""White-listed arguments from the completion API""" + description: NotRequired[Nullable[str]] + handoffs: NotRequired[Nullable[List[str]]] + metadata: NotRequired[Nullable[Dict[str, Any]]] + version_message: NotRequired[Nullable[str]] + + +class CreateAgentRequest(BaseModel): + model: str + + name: str + + instructions: OptionalNullable[str] = UNSET + r"""Instruction prompt the model will follow during the conversation.""" + + tools: Optional[List[CreateAgentRequestTool]] = None + r"""List of tools which are available to the model during the conversation.""" + + completion_args: Optional[CompletionArgs] = None + r"""White-listed arguments from the completion API""" + + description: OptionalNullable[str] = UNSET + + handoffs: OptionalNullable[List[str]] = UNSET + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + + version_message: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "instructions", + "tools", + "completion_args", + "description", + "handoffs", + "metadata", + "version_message", + ] + ) + nullable_fields = set( + ["instructions", "description", "handoffs", "metadata", "version_message"] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/createbatchjobrequest.py b/src/mistralai/client/models/createbatchjobrequest.py new file mode 100644 index 00000000..9a901fef --- /dev/null +++ b/src/mistralai/client/models/createbatchjobrequest.py @@ -0,0 +1,88 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 56e24cd24e98 + +from __future__ import annotations +from .apiendpoint import APIEndpoint +from .batchrequest import BatchRequest, BatchRequestTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Dict, List, Optional +from typing_extensions import NotRequired, TypedDict + + +class CreateBatchJobRequestTypedDict(TypedDict): + endpoint: APIEndpoint + input_files: NotRequired[Nullable[List[str]]] + r"""The list of input files to be used for batch inference, these files should be `jsonl` files, containing the input data corresponding to the bory request for the batch inference in a \"body\" field. An example of such file is the following: ```json {\"custom_id\": \"0\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French cheese?\"}]}} {\"custom_id\": \"1\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French wine?\"}]}} ```""" + requests: NotRequired[Nullable[List[BatchRequestTypedDict]]] + model: NotRequired[Nullable[str]] + r"""The model to be used for batch inference.""" + agent_id: NotRequired[Nullable[str]] + r"""In case you want to use a specific agent from the **deprecated** agents api for batch inference, you can specify the agent ID here.""" + metadata: NotRequired[Nullable[Dict[str, str]]] + r"""The metadata of your choice to be associated with the batch inference job.""" + timeout_hours: NotRequired[int] + r"""The timeout in hours for the batch inference job.""" + + +class CreateBatchJobRequest(BaseModel): + endpoint: APIEndpoint + + input_files: OptionalNullable[List[str]] = UNSET + r"""The list of input files to be used for batch inference, these files should be `jsonl` files, containing the input data corresponding to the bory request for the batch inference in a \"body\" field. An example of such file is the following: ```json {\"custom_id\": \"0\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French cheese?\"}]}} {\"custom_id\": \"1\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French wine?\"}]}} ```""" + + requests: OptionalNullable[List[BatchRequest]] = UNSET + + model: OptionalNullable[str] = UNSET + r"""The model to be used for batch inference.""" + + agent_id: OptionalNullable[str] = UNSET + r"""In case you want to use a specific agent from the **deprecated** agents api for batch inference, you can specify the agent ID here.""" + + metadata: OptionalNullable[Dict[str, str]] = UNSET + r"""The metadata of your choice to be associated with the batch inference job.""" + + timeout_hours: Optional[int] = 24 + r"""The timeout in hours for the batch inference job.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "input_files", + "requests", + "model", + "agent_id", + "metadata", + "timeout_hours", + ] + ) + nullable_fields = set( + ["input_files", "requests", "model", "agent_id", "metadata"] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/createfileresponse.py b/src/mistralai/client/models/createfileresponse.py new file mode 100644 index 00000000..76821280 --- /dev/null +++ b/src/mistralai/client/models/createfileresponse.py @@ -0,0 +1,96 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: fea5e4832dcc + +from __future__ import annotations +from .filepurpose import FilePurpose +from .sampletype import SampleType +from .source import Source +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +import pydantic +from pydantic import model_serializer +from typing_extensions import Annotated, NotRequired, TypedDict + + +class CreateFileResponseTypedDict(TypedDict): + id: str + r"""The unique identifier of the file.""" + object: str + r"""The object type, which is always \"file\".""" + size_bytes: int + r"""The size of the file, in bytes.""" + created_at: int + r"""The UNIX timestamp (in seconds) of the event.""" + filename: str + r"""The name of the uploaded file.""" + purpose: FilePurpose + sample_type: SampleType + source: Source + num_lines: NotRequired[Nullable[int]] + mimetype: NotRequired[Nullable[str]] + signature: NotRequired[Nullable[str]] + + +class CreateFileResponse(BaseModel): + id: str + r"""The unique identifier of the file.""" + + object: str + r"""The object type, which is always \"file\".""" + + size_bytes: Annotated[int, pydantic.Field(alias="bytes")] + r"""The size of the file, in bytes.""" + + created_at: int + r"""The UNIX timestamp (in seconds) of the event.""" + + filename: str + r"""The name of the uploaded file.""" + + purpose: FilePurpose + + sample_type: SampleType + + source: Source + + num_lines: OptionalNullable[int] = UNSET + + mimetype: OptionalNullable[str] = UNSET + + signature: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["num_lines", "mimetype", "signature"]) + nullable_fields = set(["num_lines", "mimetype", "signature"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + CreateFileResponse.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/createfinetuningjobrequest.py b/src/mistralai/client/models/createfinetuningjobrequest.py new file mode 100644 index 00000000..e328d944 --- /dev/null +++ b/src/mistralai/client/models/createfinetuningjobrequest.py @@ -0,0 +1,146 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: c60d2a45d66b + +from __future__ import annotations +from .classifiertarget import ClassifierTarget, ClassifierTargetTypedDict +from .classifiertrainingparameters import ( + ClassifierTrainingParameters, + ClassifierTrainingParametersTypedDict, +) +from .completiontrainingparameters import ( + CompletionTrainingParameters, + CompletionTrainingParametersTypedDict, +) +from .finetuneablemodeltype import FineTuneableModelType +from .githubrepositoryin import GithubRepositoryIn, GithubRepositoryInTypedDict +from .trainingfile import TrainingFile, TrainingFileTypedDict +from .wandbintegration import WandbIntegration, WandbIntegrationTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +CreateFineTuningJobRequestIntegrationTypedDict = WandbIntegrationTypedDict + + +CreateFineTuningJobRequestIntegration = WandbIntegration + + +HyperparametersTypedDict = TypeAliasType( + "HyperparametersTypedDict", + Union[ClassifierTrainingParametersTypedDict, CompletionTrainingParametersTypedDict], +) + + +Hyperparameters = TypeAliasType( + "Hyperparameters", Union[ClassifierTrainingParameters, CompletionTrainingParameters] +) + + +CreateFineTuningJobRequestRepositoryTypedDict = GithubRepositoryInTypedDict + + +CreateFineTuningJobRequestRepository = GithubRepositoryIn + + +class CreateFineTuningJobRequestTypedDict(TypedDict): + model: str + hyperparameters: HyperparametersTypedDict + training_files: NotRequired[List[TrainingFileTypedDict]] + validation_files: NotRequired[Nullable[List[str]]] + r"""A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files.""" + suffix: NotRequired[Nullable[str]] + r"""A string that will be added to your fine-tuning model name. For example, a suffix of \"my-great-model\" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...`""" + integrations: NotRequired[ + Nullable[List[CreateFineTuningJobRequestIntegrationTypedDict]] + ] + r"""A list of integrations to enable for your fine-tuning job.""" + auto_start: NotRequired[bool] + r"""This field will be required in a future release.""" + invalid_sample_skip_percentage: NotRequired[float] + job_type: NotRequired[Nullable[FineTuneableModelType]] + repositories: NotRequired[ + Nullable[List[CreateFineTuningJobRequestRepositoryTypedDict]] + ] + classifier_targets: NotRequired[Nullable[List[ClassifierTargetTypedDict]]] + + +class CreateFineTuningJobRequest(BaseModel): + model: str + + hyperparameters: Hyperparameters + + training_files: Optional[List[TrainingFile]] = None + + validation_files: OptionalNullable[List[str]] = UNSET + r"""A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files.""" + + suffix: OptionalNullable[str] = UNSET + r"""A string that will be added to your fine-tuning model name. For example, a suffix of \"my-great-model\" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...`""" + + integrations: OptionalNullable[List[CreateFineTuningJobRequestIntegration]] = UNSET + r"""A list of integrations to enable for your fine-tuning job.""" + + auto_start: Optional[bool] = None + r"""This field will be required in a future release.""" + + invalid_sample_skip_percentage: Optional[float] = 0 + + job_type: OptionalNullable[FineTuneableModelType] = UNSET + + repositories: OptionalNullable[List[CreateFineTuningJobRequestRepository]] = UNSET + + classifier_targets: OptionalNullable[List[ClassifierTarget]] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "training_files", + "validation_files", + "suffix", + "integrations", + "auto_start", + "invalid_sample_skip_percentage", + "job_type", + "repositories", + "classifier_targets", + ] + ) + nullable_fields = set( + [ + "validation_files", + "suffix", + "integrations", + "job_type", + "repositories", + "classifier_targets", + ] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/createlibraryrequest.py b/src/mistralai/client/models/createlibraryrequest.py new file mode 100644 index 00000000..58874e01 --- /dev/null +++ b/src/mistralai/client/models/createlibraryrequest.py @@ -0,0 +1,52 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 1c489bec2f53 + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class CreateLibraryRequestTypedDict(TypedDict): + name: str + description: NotRequired[Nullable[str]] + chunk_size: NotRequired[Nullable[int]] + + +class CreateLibraryRequest(BaseModel): + name: str + + description: OptionalNullable[str] = UNSET + + chunk_size: OptionalNullable[int] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["description", "chunk_size"]) + nullable_fields = set(["description", "chunk_size"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/delete_model_v1_models_model_id_deleteop.py b/src/mistralai/client/models/delete_model_v1_models_model_id_deleteop.py new file mode 100644 index 00000000..199614f5 --- /dev/null +++ b/src/mistralai/client/models/delete_model_v1_models_model_id_deleteop.py @@ -0,0 +1,19 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 767aba526e43 + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class DeleteModelV1ModelsModelIDDeleteRequestTypedDict(TypedDict): + model_id: str + r"""The ID of the model to delete.""" + + +class DeleteModelV1ModelsModelIDDeleteRequest(BaseModel): + model_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + r"""The ID of the model to delete.""" diff --git a/src/mistralai/client/models/deletefileresponse.py b/src/mistralai/client/models/deletefileresponse.py new file mode 100644 index 00000000..ffd0e0d0 --- /dev/null +++ b/src/mistralai/client/models/deletefileresponse.py @@ -0,0 +1,26 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 3ee464763a32 + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing_extensions import TypedDict + + +class DeleteFileResponseTypedDict(TypedDict): + id: str + r"""The ID of the deleted file.""" + object: str + r"""The object type that was deleted""" + deleted: bool + r"""The deletion status.""" + + +class DeleteFileResponse(BaseModel): + id: str + r"""The ID of the deleted file.""" + + object: str + r"""The object type that was deleted""" + + deleted: bool + r"""The deletion status.""" diff --git a/src/mistralai/client/models/deletemodelout.py b/src/mistralai/client/models/deletemodelout.py new file mode 100644 index 00000000..fa0c20a4 --- /dev/null +++ b/src/mistralai/client/models/deletemodelout.py @@ -0,0 +1,44 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: ef6a1671c739 + +from __future__ import annotations +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class DeleteModelOutTypedDict(TypedDict): + id: str + r"""The ID of the deleted model.""" + object: NotRequired[str] + r"""The object type that was deleted""" + deleted: NotRequired[bool] + r"""The deletion status""" + + +class DeleteModelOut(BaseModel): + id: str + r"""The ID of the deleted model.""" + + object: Optional[str] = "model" + r"""The object type that was deleted""" + + deleted: Optional[bool] = True + r"""The deletion status""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["object", "deleted"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/src/mistralai/client/models/deltamessage.py b/src/mistralai/client/models/deltamessage.py new file mode 100644 index 00000000..d9fa230e --- /dev/null +++ b/src/mistralai/client/models/deltamessage.py @@ -0,0 +1,65 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 68f53d67a140 + +from __future__ import annotations +from .contentchunk import ContentChunk, ContentChunkTypedDict +from .toolcall import ToolCall, ToolCallTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +DeltaMessageContentTypedDict = TypeAliasType( + "DeltaMessageContentTypedDict", Union[str, List[ContentChunkTypedDict]] +) + + +DeltaMessageContent = TypeAliasType( + "DeltaMessageContent", Union[str, List[ContentChunk]] +) + + +class DeltaMessageTypedDict(TypedDict): + role: NotRequired[Nullable[str]] + content: NotRequired[Nullable[DeltaMessageContentTypedDict]] + tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] + + +class DeltaMessage(BaseModel): + role: OptionalNullable[str] = UNSET + + content: OptionalNullable[DeltaMessageContent] = UNSET + + tool_calls: OptionalNullable[List[ToolCall]] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["role", "content", "tool_calls"]) + nullable_fields = set(["role", "content", "tool_calls"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/document.py b/src/mistralai/client/models/document.py new file mode 100644 index 00000000..fcc5bca5 --- /dev/null +++ b/src/mistralai/client/models/document.py @@ -0,0 +1,131 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: fbbf7428328c + +from __future__ import annotations +from .processstatus import ProcessStatus +from datetime import datetime +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Any, Dict +from typing_extensions import NotRequired, TypedDict + + +class DocumentTypedDict(TypedDict): + id: str + library_id: str + hash: Nullable[str] + mime_type: Nullable[str] + extension: Nullable[str] + size: Nullable[int] + name: str + created_at: datetime + process_status: ProcessStatus + uploaded_by_id: Nullable[str] + uploaded_by_type: str + processing_status: str + tokens_processing_total: int + summary: NotRequired[Nullable[str]] + last_processed_at: NotRequired[Nullable[datetime]] + number_of_pages: NotRequired[Nullable[int]] + tokens_processing_main_content: NotRequired[Nullable[int]] + tokens_processing_summary: NotRequired[Nullable[int]] + url: NotRequired[Nullable[str]] + attributes: NotRequired[Nullable[Dict[str, Any]]] + + +class Document(BaseModel): + id: str + + library_id: str + + hash: Nullable[str] + + mime_type: Nullable[str] + + extension: Nullable[str] + + size: Nullable[int] + + name: str + + created_at: datetime + + process_status: ProcessStatus + + uploaded_by_id: Nullable[str] + + uploaded_by_type: str + + processing_status: str + + tokens_processing_total: int + + summary: OptionalNullable[str] = UNSET + + last_processed_at: OptionalNullable[datetime] = UNSET + + number_of_pages: OptionalNullable[int] = UNSET + + tokens_processing_main_content: OptionalNullable[int] = UNSET + + tokens_processing_summary: OptionalNullable[int] = UNSET + + url: OptionalNullable[str] = UNSET + + attributes: OptionalNullable[Dict[str, Any]] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "summary", + "last_processed_at", + "number_of_pages", + "tokens_processing_main_content", + "tokens_processing_summary", + "url", + "attributes", + ] + ) + nullable_fields = set( + [ + "hash", + "mime_type", + "extension", + "size", + "summary", + "last_processed_at", + "number_of_pages", + "uploaded_by_id", + "tokens_processing_main_content", + "tokens_processing_summary", + "url", + "attributes", + ] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/documentlibrarytool.py b/src/mistralai/client/models/documentlibrarytool.py new file mode 100644 index 00000000..642c3202 --- /dev/null +++ b/src/mistralai/client/models/documentlibrarytool.py @@ -0,0 +1,71 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 3eb3c218f457 + +from __future__ import annotations +from .toolconfiguration import ToolConfiguration, ToolConfigurationTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import List, Literal +from typing_extensions import Annotated, NotRequired, TypedDict + + +class DocumentLibraryToolTypedDict(TypedDict): + library_ids: List[str] + r"""Ids of the library in which to search.""" + tool_configuration: NotRequired[Nullable[ToolConfigurationTypedDict]] + type: Literal["document_library"] + + +class DocumentLibraryTool(BaseModel): + library_ids: List[str] + r"""Ids of the library in which to search.""" + + tool_configuration: OptionalNullable[ToolConfiguration] = UNSET + + type: Annotated[ + Annotated[ + Literal["document_library"], + AfterValidator(validate_const("document_library")), + ], + pydantic.Field(alias="type"), + ] = "document_library" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["tool_configuration"]) + nullable_fields = set(["tool_configuration"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + DocumentLibraryTool.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/documenttextcontent.py b/src/mistralai/client/models/documenttextcontent.py new file mode 100644 index 00000000..b6904cb4 --- /dev/null +++ b/src/mistralai/client/models/documenttextcontent.py @@ -0,0 +1,14 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: e730005e44cb + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing_extensions import TypedDict + + +class DocumentTextContentTypedDict(TypedDict): + text: str + + +class DocumentTextContent(BaseModel): + text: str diff --git a/src/mistralai/client/models/documenturlchunk.py b/src/mistralai/client/models/documenturlchunk.py new file mode 100644 index 00000000..43444d98 --- /dev/null +++ b/src/mistralai/client/models/documenturlchunk.py @@ -0,0 +1,70 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 4309807f6048 + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class DocumentURLChunkTypedDict(TypedDict): + document_url: str + type: Literal["document_url"] + document_name: NotRequired[Nullable[str]] + r"""The filename of the document""" + + +class DocumentURLChunk(BaseModel): + document_url: str + + type: Annotated[ + Annotated[ + Optional[Literal["document_url"]], + AfterValidator(validate_const("document_url")), + ], + pydantic.Field(alias="type"), + ] = "document_url" + + document_name: OptionalNullable[str] = UNSET + r"""The filename of the document""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type", "document_name"]) + nullable_fields = set(["document_name"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + DocumentURLChunk.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/embeddingdtype.py b/src/mistralai/client/models/embeddingdtype.py new file mode 100644 index 00000000..732c4ebe --- /dev/null +++ b/src/mistralai/client/models/embeddingdtype.py @@ -0,0 +1,14 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 77f9526a78df + +from __future__ import annotations +from typing import Literal + + +EmbeddingDtype = Literal[ + "float", + "int8", + "uint8", + "binary", + "ubinary", +] diff --git a/src/mistralai/client/models/embeddingrequest.py b/src/mistralai/client/models/embeddingrequest.py new file mode 100644 index 00000000..5fa2d2f6 --- /dev/null +++ b/src/mistralai/client/models/embeddingrequest.py @@ -0,0 +1,89 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: eadbe3f9040c + +from __future__ import annotations +from .embeddingdtype import EmbeddingDtype +from .encodingformat import EncodingFormat +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +import pydantic +from pydantic import model_serializer +from typing import Any, Dict, List, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +EmbeddingRequestInputsTypedDict = TypeAliasType( + "EmbeddingRequestInputsTypedDict", Union[str, List[str]] +) +r"""Text to embed.""" + + +EmbeddingRequestInputs = TypeAliasType("EmbeddingRequestInputs", Union[str, List[str]]) +r"""Text to embed.""" + + +class EmbeddingRequestTypedDict(TypedDict): + model: str + r"""ID of the model to use.""" + inputs: EmbeddingRequestInputsTypedDict + r"""Text to embed.""" + metadata: NotRequired[Nullable[Dict[str, Any]]] + output_dimension: NotRequired[Nullable[int]] + r"""The dimension of the output embeddings when feature available. If not provided, a default output dimension will be used.""" + output_dtype: NotRequired[EmbeddingDtype] + encoding_format: NotRequired[EncodingFormat] + + +class EmbeddingRequest(BaseModel): + model: str + r"""ID of the model to use.""" + + inputs: Annotated[EmbeddingRequestInputs, pydantic.Field(alias="input")] + r"""Text to embed.""" + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + + output_dimension: OptionalNullable[int] = UNSET + r"""The dimension of the output embeddings when feature available. If not provided, a default output dimension will be used.""" + + output_dtype: Optional[EmbeddingDtype] = None + + encoding_format: Optional[EncodingFormat] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + ["metadata", "output_dimension", "output_dtype", "encoding_format"] + ) + nullable_fields = set(["metadata", "output_dimension"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + EmbeddingRequest.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/embeddingresponse.py b/src/mistralai/client/models/embeddingresponse.py new file mode 100644 index 00000000..6ffd6894 --- /dev/null +++ b/src/mistralai/client/models/embeddingresponse.py @@ -0,0 +1,29 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: f7d790e84b65 + +from __future__ import annotations +from .embeddingresponsedata import EmbeddingResponseData, EmbeddingResponseDataTypedDict +from .usageinfo import UsageInfo, UsageInfoTypedDict +from mistralai.client.types import BaseModel +from typing import List +from typing_extensions import TypedDict + + +class EmbeddingResponseTypedDict(TypedDict): + id: str + object: str + model: str + usage: UsageInfoTypedDict + data: List[EmbeddingResponseDataTypedDict] + + +class EmbeddingResponse(BaseModel): + id: str + + object: str + + model: str + + usage: UsageInfo + + data: List[EmbeddingResponseData] diff --git a/src/mistralai/client/models/embeddingresponsedata.py b/src/mistralai/client/models/embeddingresponsedata.py new file mode 100644 index 00000000..098cfae0 --- /dev/null +++ b/src/mistralai/client/models/embeddingresponsedata.py @@ -0,0 +1,38 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 6d6ead6f3803 + +from __future__ import annotations +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer +from typing import List, Optional +from typing_extensions import NotRequired, TypedDict + + +class EmbeddingResponseDataTypedDict(TypedDict): + object: NotRequired[str] + embedding: NotRequired[List[float]] + index: NotRequired[int] + + +class EmbeddingResponseData(BaseModel): + object: Optional[str] = None + + embedding: Optional[List[float]] = None + + index: Optional[int] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["object", "embedding", "index"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/src/mistralai/client/models/encodingformat.py b/src/mistralai/client/models/encodingformat.py new file mode 100644 index 00000000..4a39d029 --- /dev/null +++ b/src/mistralai/client/models/encodingformat.py @@ -0,0 +1,11 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: b51ec296cc92 + +from __future__ import annotations +from typing import Literal + + +EncodingFormat = Literal[ + "float", + "base64", +] diff --git a/src/mistralai/client/models/entitytype.py b/src/mistralai/client/models/entitytype.py new file mode 100644 index 00000000..56d82cbe --- /dev/null +++ b/src/mistralai/client/models/entitytype.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 62d6a6a13288 + +from __future__ import annotations +from mistralai.client.types import UnrecognizedStr +from typing import Literal, Union + + +EntityType = Union[ + Literal[ + "User", + "Workspace", + "Org", + ], + UnrecognizedStr, +] +r"""The type of entity, used to share a library.""" diff --git a/src/mistralai/client/models/event.py b/src/mistralai/client/models/event.py new file mode 100644 index 00000000..c40ae2b1 --- /dev/null +++ b/src/mistralai/client/models/event.py @@ -0,0 +1,57 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: e5a68ac2dd57 + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Any, Dict +from typing_extensions import NotRequired, TypedDict + + +class EventTypedDict(TypedDict): + name: str + r"""The name of the event.""" + created_at: int + r"""The UNIX timestamp (in seconds) of the event.""" + data: NotRequired[Nullable[Dict[str, Any]]] + + +class Event(BaseModel): + name: str + r"""The name of the event.""" + + created_at: int + r"""The UNIX timestamp (in seconds) of the event.""" + + data: OptionalNullable[Dict[str, Any]] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["data"]) + nullable_fields = set(["data"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/file.py b/src/mistralai/client/models/file.py new file mode 100644 index 00000000..1b0ea1d4 --- /dev/null +++ b/src/mistralai/client/models/file.py @@ -0,0 +1,51 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: f972c39edfcf + +from __future__ import annotations +import io +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import FieldMetadata, MultipartFormMetadata +import pydantic +from pydantic import model_serializer +from typing import IO, Optional, Union +from typing_extensions import Annotated, NotRequired, TypedDict + + +class FileTypedDict(TypedDict): + file_name: str + content: Union[bytes, IO[bytes], io.BufferedReader] + content_type: NotRequired[str] + + +class File(BaseModel): + file_name: Annotated[ + str, pydantic.Field(alias="fileName"), FieldMetadata(multipart=True) + ] + + content: Annotated[ + Union[bytes, IO[bytes], io.BufferedReader], + pydantic.Field(alias=""), + FieldMetadata(multipart=MultipartFormMetadata(content=True)), + ] + + content_type: Annotated[ + Optional[str], + pydantic.Field(alias="Content-Type"), + FieldMetadata(multipart=True), + ] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["contentType"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/src/mistralai/client/models/filechunk.py b/src/mistralai/client/models/filechunk.py new file mode 100644 index 00000000..5c8d2646 --- /dev/null +++ b/src/mistralai/client/models/filechunk.py @@ -0,0 +1,47 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: ff3c2d33ab1e + +from __future__ import annotations +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, TypedDict + + +class FileChunkTypedDict(TypedDict): + file_id: str + type: Literal["file"] + + +class FileChunk(BaseModel): + file_id: str + + type: Annotated[ + Annotated[Optional[Literal["file"]], AfterValidator(validate_const("file"))], + pydantic.Field(alias="type"), + ] = "file" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + FileChunk.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/filepurpose.py b/src/mistralai/client/models/filepurpose.py new file mode 100644 index 00000000..49a5568f --- /dev/null +++ b/src/mistralai/client/models/filepurpose.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: a11e7f9f2d45 + +from __future__ import annotations +from mistralai.client.types import UnrecognizedStr +from typing import Literal, Union + + +FilePurpose = Union[ + Literal[ + "fine-tune", + "batch", + "ocr", + ], + UnrecognizedStr, +] diff --git a/src/mistralai/client/models/files_api_routes_delete_fileop.py b/src/mistralai/client/models/files_api_routes_delete_fileop.py new file mode 100644 index 00000000..eaba274b --- /dev/null +++ b/src/mistralai/client/models/files_api_routes_delete_fileop.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 2f385cc6138f + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class FilesAPIRoutesDeleteFileRequestTypedDict(TypedDict): + file_id: str + + +class FilesAPIRoutesDeleteFileRequest(BaseModel): + file_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/client/models/files_api_routes_download_fileop.py b/src/mistralai/client/models/files_api_routes_download_fileop.py new file mode 100644 index 00000000..83de8e73 --- /dev/null +++ b/src/mistralai/client/models/files_api_routes_download_fileop.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 8184ee3577c3 + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class FilesAPIRoutesDownloadFileRequestTypedDict(TypedDict): + file_id: str + + +class FilesAPIRoutesDownloadFileRequest(BaseModel): + file_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/client/models/files_api_routes_get_signed_urlop.py b/src/mistralai/client/models/files_api_routes_get_signed_urlop.py new file mode 100644 index 00000000..64cd6ac5 --- /dev/null +++ b/src/mistralai/client/models/files_api_routes_get_signed_urlop.py @@ -0,0 +1,43 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 0a1a18c6431e + +from __future__ import annotations +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata +from pydantic import model_serializer +from typing import Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class FilesAPIRoutesGetSignedURLRequestTypedDict(TypedDict): + file_id: str + expiry: NotRequired[int] + r"""Number of hours before the url becomes invalid. Defaults to 24h""" + + +class FilesAPIRoutesGetSignedURLRequest(BaseModel): + file_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + expiry: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 24 + r"""Number of hours before the url becomes invalid. Defaults to 24h""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["expiry"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/src/mistralai/client/models/files_api_routes_list_filesop.py b/src/mistralai/client/models/files_api_routes_list_filesop.py new file mode 100644 index 00000000..b03e2f88 --- /dev/null +++ b/src/mistralai/client/models/files_api_routes_list_filesop.py @@ -0,0 +1,109 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: b2e92f2a29b4 + +from __future__ import annotations +from .filepurpose import FilePurpose +from .sampletype import SampleType +from .source import Source +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import FieldMetadata, QueryParamMetadata +from pydantic import model_serializer +from typing import List, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class FilesAPIRoutesListFilesRequestTypedDict(TypedDict): + page: NotRequired[int] + page_size: NotRequired[int] + include_total: NotRequired[bool] + sample_type: NotRequired[Nullable[List[SampleType]]] + source: NotRequired[Nullable[List[Source]]] + search: NotRequired[Nullable[str]] + purpose: NotRequired[Nullable[FilePurpose]] + mimetypes: NotRequired[Nullable[List[str]]] + + +class FilesAPIRoutesListFilesRequest(BaseModel): + page: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 0 + + page_size: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 100 + + include_total: Annotated[ + Optional[bool], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = True + + sample_type: Annotated[ + OptionalNullable[List[SampleType]], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + source: Annotated[ + OptionalNullable[List[Source]], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + search: Annotated[ + OptionalNullable[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + purpose: Annotated[ + OptionalNullable[FilePurpose], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + mimetypes: Annotated[ + OptionalNullable[List[str]], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "page", + "page_size", + "include_total", + "sample_type", + "source", + "search", + "purpose", + "mimetypes", + ] + ) + nullable_fields = set( + ["sample_type", "source", "search", "purpose", "mimetypes"] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/files_api_routes_retrieve_fileop.py b/src/mistralai/client/models/files_api_routes_retrieve_fileop.py new file mode 100644 index 00000000..5f8de05f --- /dev/null +++ b/src/mistralai/client/models/files_api_routes_retrieve_fileop.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 5d5dbb8d5f7a + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class FilesAPIRoutesRetrieveFileRequestTypedDict(TypedDict): + file_id: str + + +class FilesAPIRoutesRetrieveFileRequest(BaseModel): + file_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/client/models/files_api_routes_upload_fileop.py b/src/mistralai/client/models/files_api_routes_upload_fileop.py new file mode 100644 index 00000000..54ff4e49 --- /dev/null +++ b/src/mistralai/client/models/files_api_routes_upload_fileop.py @@ -0,0 +1,58 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: f13b84de6fa7 + +from __future__ import annotations +from .file import File, FileTypedDict +from .filepurpose import FilePurpose +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import FieldMetadata, MultipartFormMetadata +from pydantic import model_serializer +from typing import Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class MultiPartBodyParamsTypedDict(TypedDict): + file: FileTypedDict + r"""The File object (not file name) to be uploaded. + To upload a file and specify a custom file name you should format your request as such: + ```bash + file=@path/to/your/file.jsonl;filename=custom_name.jsonl + ``` + Otherwise, you can just keep the original file name: + ```bash + file=@path/to/your/file.jsonl + ``` + """ + purpose: NotRequired[FilePurpose] + + +class MultiPartBodyParams(BaseModel): + file: Annotated[File, FieldMetadata(multipart=MultipartFormMetadata(file=True))] + r"""The File object (not file name) to be uploaded. + To upload a file and specify a custom file name you should format your request as such: + ```bash + file=@path/to/your/file.jsonl;filename=custom_name.jsonl + ``` + Otherwise, you can just keep the original file name: + ```bash + file=@path/to/your/file.jsonl + ``` + """ + + purpose: Annotated[Optional[FilePurpose], FieldMetadata(multipart=True)] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["purpose"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/src/mistralai/client/models/fileschema.py b/src/mistralai/client/models/fileschema.py new file mode 100644 index 00000000..e99066a9 --- /dev/null +++ b/src/mistralai/client/models/fileschema.py @@ -0,0 +1,96 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 19cde41ca32a + +from __future__ import annotations +from .filepurpose import FilePurpose +from .sampletype import SampleType +from .source import Source +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +import pydantic +from pydantic import model_serializer +from typing_extensions import Annotated, NotRequired, TypedDict + + +class FileSchemaTypedDict(TypedDict): + id: str + r"""The unique identifier of the file.""" + object: str + r"""The object type, which is always \"file\".""" + size_bytes: int + r"""The size of the file, in bytes.""" + created_at: int + r"""The UNIX timestamp (in seconds) of the event.""" + filename: str + r"""The name of the uploaded file.""" + purpose: FilePurpose + sample_type: SampleType + source: Source + num_lines: NotRequired[Nullable[int]] + mimetype: NotRequired[Nullable[str]] + signature: NotRequired[Nullable[str]] + + +class FileSchema(BaseModel): + id: str + r"""The unique identifier of the file.""" + + object: str + r"""The object type, which is always \"file\".""" + + size_bytes: Annotated[int, pydantic.Field(alias="bytes")] + r"""The size of the file, in bytes.""" + + created_at: int + r"""The UNIX timestamp (in seconds) of the event.""" + + filename: str + r"""The name of the uploaded file.""" + + purpose: FilePurpose + + sample_type: SampleType + + source: Source + + num_lines: OptionalNullable[int] = UNSET + + mimetype: OptionalNullable[str] = UNSET + + signature: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["num_lines", "mimetype", "signature"]) + nullable_fields = set(["num_lines", "mimetype", "signature"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + FileSchema.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/fimcompletionrequest.py b/src/mistralai/client/models/fimcompletionrequest.py new file mode 100644 index 00000000..ea877213 --- /dev/null +++ b/src/mistralai/client/models/fimcompletionrequest.py @@ -0,0 +1,130 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: cf3558adc3ab + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Any, Dict, List, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +FIMCompletionRequestStopTypedDict = TypeAliasType( + "FIMCompletionRequestStopTypedDict", Union[str, List[str]] +) +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +FIMCompletionRequestStop = TypeAliasType( + "FIMCompletionRequestStop", Union[str, List[str]] +) +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +class FIMCompletionRequestTypedDict(TypedDict): + model: str + r"""ID of the model with FIM to use.""" + prompt: str + r"""The text/code to complete.""" + temperature: NotRequired[Nullable[float]] + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" + top_p: NotRequired[float] + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: NotRequired[Nullable[int]] + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + stream: NotRequired[bool] + r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" + stop: NotRequired[FIMCompletionRequestStopTypedDict] + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: NotRequired[Nullable[int]] + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + metadata: NotRequired[Nullable[Dict[str, Any]]] + suffix: NotRequired[Nullable[str]] + r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + min_tokens: NotRequired[Nullable[int]] + r"""The minimum number of tokens to generate in the completion.""" + + +class FIMCompletionRequest(BaseModel): + model: str + r"""ID of the model with FIM to use.""" + + prompt: str + r"""The text/code to complete.""" + + temperature: OptionalNullable[float] = UNSET + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" + + top_p: Optional[float] = 1 + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + + max_tokens: OptionalNullable[int] = UNSET + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + + stream: Optional[bool] = False + r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" + + stop: Optional[FIMCompletionRequestStop] = None + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + random_seed: OptionalNullable[int] = UNSET + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + + suffix: OptionalNullable[str] = UNSET + r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + + min_tokens: OptionalNullable[int] = UNSET + r"""The minimum number of tokens to generate in the completion.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "temperature", + "top_p", + "max_tokens", + "stream", + "stop", + "random_seed", + "metadata", + "suffix", + "min_tokens", + ] + ) + nullable_fields = set( + [ + "temperature", + "max_tokens", + "random_seed", + "metadata", + "suffix", + "min_tokens", + ] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/fimcompletionresponse.py b/src/mistralai/client/models/fimcompletionresponse.py new file mode 100644 index 00000000..1345a116 --- /dev/null +++ b/src/mistralai/client/models/fimcompletionresponse.py @@ -0,0 +1,32 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: b860d2ba771e + +from __future__ import annotations +from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict +from .usageinfo import UsageInfo, UsageInfoTypedDict +from mistralai.client.types import BaseModel +from typing import List +from typing_extensions import TypedDict + + +class FIMCompletionResponseTypedDict(TypedDict): + id: str + object: str + model: str + usage: UsageInfoTypedDict + created: int + choices: List[ChatCompletionChoiceTypedDict] + + +class FIMCompletionResponse(BaseModel): + id: str + + object: str + + model: str + + usage: UsageInfo + + created: int + + choices: List[ChatCompletionChoice] diff --git a/src/mistralai/client/models/fimcompletionstreamrequest.py b/src/mistralai/client/models/fimcompletionstreamrequest.py new file mode 100644 index 00000000..e80efc09 --- /dev/null +++ b/src/mistralai/client/models/fimcompletionstreamrequest.py @@ -0,0 +1,128 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 1d1ee09f1913 + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Any, Dict, List, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +FIMCompletionStreamRequestStopTypedDict = TypeAliasType( + "FIMCompletionStreamRequestStopTypedDict", Union[str, List[str]] +) +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +FIMCompletionStreamRequestStop = TypeAliasType( + "FIMCompletionStreamRequestStop", Union[str, List[str]] +) +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +class FIMCompletionStreamRequestTypedDict(TypedDict): + model: str + r"""ID of the model with FIM to use.""" + prompt: str + r"""The text/code to complete.""" + temperature: NotRequired[Nullable[float]] + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" + top_p: NotRequired[float] + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: NotRequired[Nullable[int]] + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + stream: NotRequired[bool] + stop: NotRequired[FIMCompletionStreamRequestStopTypedDict] + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: NotRequired[Nullable[int]] + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + metadata: NotRequired[Nullable[Dict[str, Any]]] + suffix: NotRequired[Nullable[str]] + r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + min_tokens: NotRequired[Nullable[int]] + r"""The minimum number of tokens to generate in the completion.""" + + +class FIMCompletionStreamRequest(BaseModel): + model: str + r"""ID of the model with FIM to use.""" + + prompt: str + r"""The text/code to complete.""" + + temperature: OptionalNullable[float] = UNSET + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" + + top_p: Optional[float] = 1 + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + + max_tokens: OptionalNullable[int] = UNSET + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + + stream: Optional[bool] = True + + stop: Optional[FIMCompletionStreamRequestStop] = None + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + random_seed: OptionalNullable[int] = UNSET + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + + suffix: OptionalNullable[str] = UNSET + r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + + min_tokens: OptionalNullable[int] = UNSET + r"""The minimum number of tokens to generate in the completion.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "temperature", + "top_p", + "max_tokens", + "stream", + "stop", + "random_seed", + "metadata", + "suffix", + "min_tokens", + ] + ) + nullable_fields = set( + [ + "temperature", + "max_tokens", + "random_seed", + "metadata", + "suffix", + "min_tokens", + ] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/finetuneablemodeltype.py b/src/mistralai/client/models/finetuneablemodeltype.py new file mode 100644 index 00000000..7b924bd7 --- /dev/null +++ b/src/mistralai/client/models/finetuneablemodeltype.py @@ -0,0 +1,11 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 05e097395df3 + +from __future__ import annotations +from typing import Literal + + +FineTuneableModelType = Literal[ + "completion", + "classifier", +] diff --git a/src/mistralai/client/models/finetunedmodelcapabilities.py b/src/mistralai/client/models/finetunedmodelcapabilities.py new file mode 100644 index 00000000..2f4cca0b --- /dev/null +++ b/src/mistralai/client/models/finetunedmodelcapabilities.py @@ -0,0 +1,52 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 475c805eab95 + +from __future__ import annotations +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class FineTunedModelCapabilitiesTypedDict(TypedDict): + completion_chat: NotRequired[bool] + completion_fim: NotRequired[bool] + function_calling: NotRequired[bool] + fine_tuning: NotRequired[bool] + classification: NotRequired[bool] + + +class FineTunedModelCapabilities(BaseModel): + completion_chat: Optional[bool] = True + + completion_fim: Optional[bool] = False + + function_calling: Optional[bool] = False + + fine_tuning: Optional[bool] = False + + classification: Optional[bool] = False + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "completion_chat", + "completion_fim", + "function_calling", + "fine_tuning", + "classification", + ] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/src/mistralai/client/models/ftclassifierlossfunction.py b/src/mistralai/client/models/ftclassifierlossfunction.py new file mode 100644 index 00000000..ccb0f21b --- /dev/null +++ b/src/mistralai/client/models/ftclassifierlossfunction.py @@ -0,0 +1,15 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: d21e2a36ab1f + +from __future__ import annotations +from mistralai.client.types import UnrecognizedStr +from typing import Literal, Union + + +FTClassifierLossFunction = Union[ + Literal[ + "single_class", + "multi_class", + ], + UnrecognizedStr, +] diff --git a/src/mistralai/client/models/ftmodelcard.py b/src/mistralai/client/models/ftmodelcard.py new file mode 100644 index 00000000..2c26ff2f --- /dev/null +++ b/src/mistralai/client/models/ftmodelcard.py @@ -0,0 +1,132 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: c4f15eed2ca2 + +from __future__ import annotations +from .modelcapabilities import ModelCapabilities, ModelCapabilitiesTypedDict +from datetime import datetime +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class FTModelCardTypedDict(TypedDict): + r"""Extra fields for fine-tuned models.""" + + id: str + capabilities: ModelCapabilitiesTypedDict + job: str + root: str + object: NotRequired[str] + created: NotRequired[int] + owned_by: NotRequired[str] + name: NotRequired[Nullable[str]] + description: NotRequired[Nullable[str]] + max_context_length: NotRequired[int] + aliases: NotRequired[List[str]] + deprecation: NotRequired[Nullable[datetime]] + deprecation_replacement_model: NotRequired[Nullable[str]] + default_model_temperature: NotRequired[Nullable[float]] + type: Literal["fine-tuned"] + archived: NotRequired[bool] + + +class FTModelCard(BaseModel): + r"""Extra fields for fine-tuned models.""" + + id: str + + capabilities: ModelCapabilities + + job: str + + root: str + + object: Optional[str] = "model" + + created: Optional[int] = None + + owned_by: Optional[str] = "mistralai" + + name: OptionalNullable[str] = UNSET + + description: OptionalNullable[str] = UNSET + + max_context_length: Optional[int] = 32768 + + aliases: Optional[List[str]] = None + + deprecation: OptionalNullable[datetime] = UNSET + + deprecation_replacement_model: OptionalNullable[str] = UNSET + + default_model_temperature: OptionalNullable[float] = UNSET + + type: Annotated[ + Annotated[Literal["fine-tuned"], AfterValidator(validate_const("fine-tuned"))], + pydantic.Field(alias="type"), + ] = "fine-tuned" + + archived: Optional[bool] = False + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "object", + "created", + "owned_by", + "name", + "description", + "max_context_length", + "aliases", + "deprecation", + "deprecation_replacement_model", + "default_model_temperature", + "archived", + ] + ) + nullable_fields = set( + [ + "name", + "description", + "deprecation", + "deprecation_replacement_model", + "default_model_temperature", + ] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + FTModelCard.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/function.py b/src/mistralai/client/models/function.py new file mode 100644 index 00000000..1da1dcc9 --- /dev/null +++ b/src/mistralai/client/models/function.py @@ -0,0 +1,41 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 32275a9d8fee + +from __future__ import annotations +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Any, Dict, Optional +from typing_extensions import NotRequired, TypedDict + + +class FunctionTypedDict(TypedDict): + name: str + parameters: Dict[str, Any] + description: NotRequired[str] + strict: NotRequired[bool] + + +class Function(BaseModel): + name: str + + parameters: Dict[str, Any] + + description: Optional[str] = None + + strict: Optional[bool] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["description", "strict"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/src/mistralai/client/models/functioncall.py b/src/mistralai/client/models/functioncall.py new file mode 100644 index 00000000..527c3ad4 --- /dev/null +++ b/src/mistralai/client/models/functioncall.py @@ -0,0 +1,24 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 393fca552632 + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing import Any, Dict, Union +from typing_extensions import TypeAliasType, TypedDict + + +ArgumentsTypedDict = TypeAliasType("ArgumentsTypedDict", Union[Dict[str, Any], str]) + + +Arguments = TypeAliasType("Arguments", Union[Dict[str, Any], str]) + + +class FunctionCallTypedDict(TypedDict): + name: str + arguments: ArgumentsTypedDict + + +class FunctionCall(BaseModel): + name: str + + arguments: Arguments diff --git a/src/mistralai/client/models/functioncallentry.py b/src/mistralai/client/models/functioncallentry.py new file mode 100644 index 00000000..d05fad85 --- /dev/null +++ b/src/mistralai/client/models/functioncallentry.py @@ -0,0 +1,124 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: cd058446c0aa + +from __future__ import annotations +from .functioncallentryarguments import ( + FunctionCallEntryArguments, + FunctionCallEntryArgumentsTypedDict, +) +from datetime import datetime +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, + UnrecognizedStr, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypedDict + + +FunctionCallEntryConfirmationStatus = Union[ + Literal[ + "pending", + "allowed", + "denied", + ], + UnrecognizedStr, +] + + +class FunctionCallEntryTypedDict(TypedDict): + tool_call_id: str + name: str + arguments: FunctionCallEntryArgumentsTypedDict + object: Literal["entry"] + type: Literal["function.call"] + created_at: NotRequired[datetime] + completed_at: NotRequired[Nullable[datetime]] + agent_id: NotRequired[Nullable[str]] + model: NotRequired[Nullable[str]] + id: NotRequired[str] + confirmation_status: NotRequired[Nullable[FunctionCallEntryConfirmationStatus]] + + +class FunctionCallEntry(BaseModel): + tool_call_id: str + + name: str + + arguments: FunctionCallEntryArguments + + object: Annotated[ + Annotated[Optional[Literal["entry"]], AfterValidator(validate_const("entry"))], + pydantic.Field(alias="object"), + ] = "entry" + + type: Annotated[ + Annotated[ + Optional[Literal["function.call"]], + AfterValidator(validate_const("function.call")), + ], + pydantic.Field(alias="type"), + ] = "function.call" + + created_at: Optional[datetime] = None + + completed_at: OptionalNullable[datetime] = UNSET + + agent_id: OptionalNullable[str] = UNSET + + model: OptionalNullable[str] = UNSET + + id: Optional[str] = None + + confirmation_status: OptionalNullable[FunctionCallEntryConfirmationStatus] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "object", + "type", + "created_at", + "completed_at", + "agent_id", + "model", + "id", + "confirmation_status", + ] + ) + nullable_fields = set( + ["completed_at", "agent_id", "model", "confirmation_status"] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + FunctionCallEntry.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/functioncallentryarguments.py b/src/mistralai/client/models/functioncallentryarguments.py new file mode 100644 index 00000000..afe81b24 --- /dev/null +++ b/src/mistralai/client/models/functioncallentryarguments.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 3df3767a7b93 + +from __future__ import annotations +from typing import Any, Dict, Union +from typing_extensions import TypeAliasType + + +FunctionCallEntryArgumentsTypedDict = TypeAliasType( + "FunctionCallEntryArgumentsTypedDict", Union[Dict[str, Any], str] +) + + +FunctionCallEntryArguments = TypeAliasType( + "FunctionCallEntryArguments", Union[Dict[str, Any], str] +) diff --git a/src/mistralai/client/models/functioncallevent.py b/src/mistralai/client/models/functioncallevent.py new file mode 100644 index 00000000..849eed76 --- /dev/null +++ b/src/mistralai/client/models/functioncallevent.py @@ -0,0 +1,103 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 23b120b8f122 + +from __future__ import annotations +from datetime import datetime +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, + UnrecognizedStr, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypedDict + + +FunctionCallEventConfirmationStatus = Union[ + Literal[ + "pending", + "allowed", + "denied", + ], + UnrecognizedStr, +] + + +class FunctionCallEventTypedDict(TypedDict): + id: str + name: str + tool_call_id: str + arguments: str + type: Literal["function.call.delta"] + created_at: NotRequired[datetime] + output_index: NotRequired[int] + model: NotRequired[Nullable[str]] + agent_id: NotRequired[Nullable[str]] + confirmation_status: NotRequired[Nullable[FunctionCallEventConfirmationStatus]] + + +class FunctionCallEvent(BaseModel): + id: str + + name: str + + tool_call_id: str + + arguments: str + + type: Annotated[ + Annotated[ + Literal["function.call.delta"], + AfterValidator(validate_const("function.call.delta")), + ], + pydantic.Field(alias="type"), + ] = "function.call.delta" + + created_at: Optional[datetime] = None + + output_index: Optional[int] = 0 + + model: OptionalNullable[str] = UNSET + + agent_id: OptionalNullable[str] = UNSET + + confirmation_status: OptionalNullable[FunctionCallEventConfirmationStatus] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + ["created_at", "output_index", "model", "agent_id", "confirmation_status"] + ) + nullable_fields = set(["model", "agent_id", "confirmation_status"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + FunctionCallEvent.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/functionname.py b/src/mistralai/client/models/functionname.py new file mode 100644 index 00000000..07d98a0e --- /dev/null +++ b/src/mistralai/client/models/functionname.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 000acafdb0c0 + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing_extensions import TypedDict + + +class FunctionNameTypedDict(TypedDict): + r"""this restriction of `Function` is used to select a specific function to call""" + + name: str + + +class FunctionName(BaseModel): + r"""this restriction of `Function` is used to select a specific function to call""" + + name: str diff --git a/src/mistralai/client/models/functionresultentry.py b/src/mistralai/client/models/functionresultentry.py new file mode 100644 index 00000000..01e2e36f --- /dev/null +++ b/src/mistralai/client/models/functionresultentry.py @@ -0,0 +1,84 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 213df39bd5e6 + +from __future__ import annotations +from datetime import datetime +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class FunctionResultEntryTypedDict(TypedDict): + tool_call_id: str + result: str + object: Literal["entry"] + type: Literal["function.result"] + created_at: NotRequired[datetime] + completed_at: NotRequired[Nullable[datetime]] + id: NotRequired[str] + + +class FunctionResultEntry(BaseModel): + tool_call_id: str + + result: str + + object: Annotated[ + Annotated[Optional[Literal["entry"]], AfterValidator(validate_const("entry"))], + pydantic.Field(alias="object"), + ] = "entry" + + type: Annotated[ + Annotated[ + Optional[Literal["function.result"]], + AfterValidator(validate_const("function.result")), + ], + pydantic.Field(alias="type"), + ] = "function.result" + + created_at: Optional[datetime] = None + + completed_at: OptionalNullable[datetime] = UNSET + + id: Optional[str] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["object", "type", "created_at", "completed_at", "id"]) + nullable_fields = set(["completed_at"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + FunctionResultEntry.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/functiontool.py b/src/mistralai/client/models/functiontool.py new file mode 100644 index 00000000..eae87264 --- /dev/null +++ b/src/mistralai/client/models/functiontool.py @@ -0,0 +1,31 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 2e9ef5800117 + +from __future__ import annotations +from .function import Function, FunctionTypedDict +from mistralai.client.types import BaseModel +from mistralai.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import Literal +from typing_extensions import Annotated, TypedDict + + +class FunctionToolTypedDict(TypedDict): + function: FunctionTypedDict + type: Literal["function"] + + +class FunctionTool(BaseModel): + function: Function + + type: Annotated[ + Annotated[Literal["function"], AfterValidator(validate_const("function"))], + pydantic.Field(alias="type"), + ] = "function" + + +try: + FunctionTool.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/getfileresponse.py b/src/mistralai/client/models/getfileresponse.py new file mode 100644 index 00000000..f625c153 --- /dev/null +++ b/src/mistralai/client/models/getfileresponse.py @@ -0,0 +1,99 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 81919086e371 + +from __future__ import annotations +from .filepurpose import FilePurpose +from .sampletype import SampleType +from .source import Source +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +import pydantic +from pydantic import model_serializer +from typing_extensions import Annotated, NotRequired, TypedDict + + +class GetFileResponseTypedDict(TypedDict): + id: str + r"""The unique identifier of the file.""" + object: str + r"""The object type, which is always \"file\".""" + size_bytes: int + r"""The size of the file, in bytes.""" + created_at: int + r"""The UNIX timestamp (in seconds) of the event.""" + filename: str + r"""The name of the uploaded file.""" + purpose: FilePurpose + sample_type: SampleType + source: Source + deleted: bool + num_lines: NotRequired[Nullable[int]] + mimetype: NotRequired[Nullable[str]] + signature: NotRequired[Nullable[str]] + + +class GetFileResponse(BaseModel): + id: str + r"""The unique identifier of the file.""" + + object: str + r"""The object type, which is always \"file\".""" + + size_bytes: Annotated[int, pydantic.Field(alias="bytes")] + r"""The size of the file, in bytes.""" + + created_at: int + r"""The UNIX timestamp (in seconds) of the event.""" + + filename: str + r"""The name of the uploaded file.""" + + purpose: FilePurpose + + sample_type: SampleType + + source: Source + + deleted: bool + + num_lines: OptionalNullable[int] = UNSET + + mimetype: OptionalNullable[str] = UNSET + + signature: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["num_lines", "mimetype", "signature"]) + nullable_fields = set(["num_lines", "mimetype", "signature"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + GetFileResponse.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/getsignedurlresponse.py b/src/mistralai/client/models/getsignedurlresponse.py new file mode 100644 index 00000000..4ba95894 --- /dev/null +++ b/src/mistralai/client/models/getsignedurlresponse.py @@ -0,0 +1,14 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: cee4e4197372 + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing_extensions import TypedDict + + +class GetSignedURLResponseTypedDict(TypedDict): + url: str + + +class GetSignedURLResponse(BaseModel): + url: str diff --git a/src/mistralai/client/models/githubrepository.py b/src/mistralai/client/models/githubrepository.py new file mode 100644 index 00000000..84b01078 --- /dev/null +++ b/src/mistralai/client/models/githubrepository.py @@ -0,0 +1,74 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 4bc83ce18378 + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class GithubRepositoryTypedDict(TypedDict): + name: str + owner: str + commit_id: str + type: Literal["github"] + ref: NotRequired[Nullable[str]] + weight: NotRequired[float] + + +class GithubRepository(BaseModel): + name: str + + owner: str + + commit_id: str + + type: Annotated[ + Annotated[Literal["github"], AfterValidator(validate_const("github"))], + pydantic.Field(alias="type"), + ] = "github" + + ref: OptionalNullable[str] = UNSET + + weight: Optional[float] = 1 + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["ref", "weight"]) + nullable_fields = set(["ref"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + GithubRepository.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/githubrepositoryin.py b/src/mistralai/client/models/githubrepositoryin.py new file mode 100644 index 00000000..38bcc208 --- /dev/null +++ b/src/mistralai/client/models/githubrepositoryin.py @@ -0,0 +1,74 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: eef26fbd2876 + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class GithubRepositoryInTypedDict(TypedDict): + name: str + owner: str + token: str + type: Literal["github"] + ref: NotRequired[Nullable[str]] + weight: NotRequired[float] + + +class GithubRepositoryIn(BaseModel): + name: str + + owner: str + + token: str + + type: Annotated[ + Annotated[Literal["github"], AfterValidator(validate_const("github"))], + pydantic.Field(alias="type"), + ] = "github" + + ref: OptionalNullable[str] = UNSET + + weight: Optional[float] = 1 + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["ref", "weight"]) + nullable_fields = set(["ref"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + GithubRepositoryIn.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/imagedetail.py b/src/mistralai/client/models/imagedetail.py new file mode 100644 index 00000000..1982d357 --- /dev/null +++ b/src/mistralai/client/models/imagedetail.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: c1084b549abb + +from __future__ import annotations +from mistralai.client.types import UnrecognizedStr +from typing import Literal, Union + + +ImageDetail = Union[ + Literal[ + "low", + "auto", + "high", + ], + UnrecognizedStr, +] diff --git a/src/mistralai/client/models/imagegenerationtool.py b/src/mistralai/client/models/imagegenerationtool.py new file mode 100644 index 00000000..c1789b18 --- /dev/null +++ b/src/mistralai/client/models/imagegenerationtool.py @@ -0,0 +1,66 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: e1532275faa0 + +from __future__ import annotations +from .toolconfiguration import ToolConfiguration, ToolConfigurationTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal +from typing_extensions import Annotated, NotRequired, TypedDict + + +class ImageGenerationToolTypedDict(TypedDict): + tool_configuration: NotRequired[Nullable[ToolConfigurationTypedDict]] + type: Literal["image_generation"] + + +class ImageGenerationTool(BaseModel): + tool_configuration: OptionalNullable[ToolConfiguration] = UNSET + + type: Annotated[ + Annotated[ + Literal["image_generation"], + AfterValidator(validate_const("image_generation")), + ], + pydantic.Field(alias="type"), + ] = "image_generation" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["tool_configuration"]) + nullable_fields = set(["tool_configuration"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + ImageGenerationTool.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/imageurl.py b/src/mistralai/client/models/imageurl.py new file mode 100644 index 00000000..ac1030f5 --- /dev/null +++ b/src/mistralai/client/models/imageurl.py @@ -0,0 +1,50 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: e4bbf5881fbf + +from __future__ import annotations +from .imagedetail import ImageDetail +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class ImageURLTypedDict(TypedDict): + url: str + detail: NotRequired[Nullable[ImageDetail]] + + +class ImageURL(BaseModel): + url: str + + detail: OptionalNullable[ImageDetail] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["detail"]) + nullable_fields = set(["detail"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/imageurlchunk.py b/src/mistralai/client/models/imageurlchunk.py new file mode 100644 index 00000000..7134b46e --- /dev/null +++ b/src/mistralai/client/models/imageurlchunk.py @@ -0,0 +1,62 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 746fde62f637 + +from __future__ import annotations +from .imageurl import ImageURL, ImageURLTypedDict +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +ImageURLUnionTypedDict = TypeAliasType( + "ImageURLUnionTypedDict", Union[ImageURLTypedDict, str] +) + + +ImageURLUnion = TypeAliasType("ImageURLUnion", Union[ImageURL, str]) + + +class ImageURLChunkTypedDict(TypedDict): + r"""{\"type\":\"image_url\",\"image_url\":{\"url\":\"data:image/png;base64,iVBORw0""" + + image_url: ImageURLUnionTypedDict + type: Literal["image_url"] + + +class ImageURLChunk(BaseModel): + r"""{\"type\":\"image_url\",\"image_url\":{\"url\":\"data:image/png;base64,iVBORw0""" + + image_url: ImageURLUnion + + type: Annotated[ + Annotated[ + Optional[Literal["image_url"]], AfterValidator(validate_const("image_url")) + ], + pydantic.Field(alias="type"), + ] = "image_url" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ImageURLChunk.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/inputentries.py b/src/mistralai/client/models/inputentries.py new file mode 100644 index 00000000..e2da5a80 --- /dev/null +++ b/src/mistralai/client/models/inputentries.py @@ -0,0 +1,38 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 44727997dacb + +from __future__ import annotations +from .agenthandoffentry import AgentHandoffEntry, AgentHandoffEntryTypedDict +from .functioncallentry import FunctionCallEntry, FunctionCallEntryTypedDict +from .functionresultentry import FunctionResultEntry, FunctionResultEntryTypedDict +from .messageinputentry import MessageInputEntry, MessageInputEntryTypedDict +from .messageoutputentry import MessageOutputEntry, MessageOutputEntryTypedDict +from .toolexecutionentry import ToolExecutionEntry, ToolExecutionEntryTypedDict +from typing import Union +from typing_extensions import TypeAliasType + + +InputEntriesTypedDict = TypeAliasType( + "InputEntriesTypedDict", + Union[ + FunctionResultEntryTypedDict, + MessageInputEntryTypedDict, + MessageOutputEntryTypedDict, + AgentHandoffEntryTypedDict, + ToolExecutionEntryTypedDict, + FunctionCallEntryTypedDict, + ], +) + + +InputEntries = TypeAliasType( + "InputEntries", + Union[ + FunctionResultEntry, + MessageInputEntry, + MessageOutputEntry, + AgentHandoffEntry, + ToolExecutionEntry, + FunctionCallEntry, + ], +) diff --git a/src/mistralai/client/models/inputs.py b/src/mistralai/client/models/inputs.py new file mode 100644 index 00000000..9ecd7f48 --- /dev/null +++ b/src/mistralai/client/models/inputs.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 84a8007518c7 + +from __future__ import annotations +from .instructrequest import InstructRequest, InstructRequestTypedDict +from typing import List, Union +from typing_extensions import TypeAliasType + + +InputsTypedDict = TypeAliasType( + "InputsTypedDict", Union[InstructRequestTypedDict, List[InstructRequestTypedDict]] +) +r"""Chat to classify""" + + +Inputs = TypeAliasType("Inputs", Union[InstructRequest, List[InstructRequest]]) +r"""Chat to classify""" diff --git a/src/mistralai/client/models/instructrequest.py b/src/mistralai/client/models/instructrequest.py new file mode 100644 index 00000000..e5f9cccf --- /dev/null +++ b/src/mistralai/client/models/instructrequest.py @@ -0,0 +1,43 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 6d3ad9f896c7 + +from __future__ import annotations +from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .systemmessage import SystemMessage, SystemMessageTypedDict +from .toolmessage import ToolMessage, ToolMessageTypedDict +from .usermessage import UserMessage, UserMessageTypedDict +from mistralai.client.types import BaseModel +from mistralai.client.utils import get_discriminator +from pydantic import Discriminator, Tag +from typing import List, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +InstructRequestMessageTypedDict = TypeAliasType( + "InstructRequestMessageTypedDict", + Union[ + SystemMessageTypedDict, + UserMessageTypedDict, + AssistantMessageTypedDict, + ToolMessageTypedDict, + ], +) + + +InstructRequestMessage = Annotated[ + Union[ + Annotated[AssistantMessage, Tag("assistant")], + Annotated[SystemMessage, Tag("system")], + Annotated[ToolMessage, Tag("tool")], + Annotated[UserMessage, Tag("user")], + ], + Discriminator(lambda m: get_discriminator(m, "role", "role")), +] + + +class InstructRequestTypedDict(TypedDict): + messages: List[InstructRequestMessageTypedDict] + + +class InstructRequest(BaseModel): + messages: List[InstructRequestMessage] diff --git a/src/mistralai/client/models/jobmetadata.py b/src/mistralai/client/models/jobmetadata.py new file mode 100644 index 00000000..f6e96fa1 --- /dev/null +++ b/src/mistralai/client/models/jobmetadata.py @@ -0,0 +1,84 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: cfbdde7fc0a2 + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class JobMetadataTypedDict(TypedDict): + expected_duration_seconds: NotRequired[Nullable[int]] + cost: NotRequired[Nullable[float]] + cost_currency: NotRequired[Nullable[str]] + train_tokens_per_step: NotRequired[Nullable[int]] + train_tokens: NotRequired[Nullable[int]] + data_tokens: NotRequired[Nullable[int]] + estimated_start_time: NotRequired[Nullable[int]] + + +class JobMetadata(BaseModel): + expected_duration_seconds: OptionalNullable[int] = UNSET + + cost: OptionalNullable[float] = UNSET + + cost_currency: OptionalNullable[str] = UNSET + + train_tokens_per_step: OptionalNullable[int] = UNSET + + train_tokens: OptionalNullable[int] = UNSET + + data_tokens: OptionalNullable[int] = UNSET + + estimated_start_time: OptionalNullable[int] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "expected_duration_seconds", + "cost", + "cost_currency", + "train_tokens_per_step", + "train_tokens", + "data_tokens", + "estimated_start_time", + ] + ) + nullable_fields = set( + [ + "expected_duration_seconds", + "cost", + "cost_currency", + "train_tokens_per_step", + "train_tokens", + "data_tokens", + "estimated_start_time", + ] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/jobs_api_routes_batch_cancel_batch_jobop.py b/src/mistralai/client/models/jobs_api_routes_batch_cancel_batch_jobop.py new file mode 100644 index 00000000..de2e6347 --- /dev/null +++ b/src/mistralai/client/models/jobs_api_routes_batch_cancel_batch_jobop.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: b56cb6c17c95 + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class JobsAPIRoutesBatchCancelBatchJobRequestTypedDict(TypedDict): + job_id: str + + +class JobsAPIRoutesBatchCancelBatchJobRequest(BaseModel): + job_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/client/models/jobs_api_routes_batch_get_batch_jobop.py b/src/mistralai/client/models/jobs_api_routes_batch_get_batch_jobop.py new file mode 100644 index 00000000..d779e1d9 --- /dev/null +++ b/src/mistralai/client/models/jobs_api_routes_batch_get_batch_jobop.py @@ -0,0 +1,55 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 36b5a6b3ceee + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata +from pydantic import model_serializer +from typing_extensions import Annotated, NotRequired, TypedDict + + +class JobsAPIRoutesBatchGetBatchJobRequestTypedDict(TypedDict): + job_id: str + inline: NotRequired[Nullable[bool]] + + +class JobsAPIRoutesBatchGetBatchJobRequest(BaseModel): + job_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + inline: Annotated[ + OptionalNullable[bool], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["inline"]) + nullable_fields = set(["inline"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/jobs_api_routes_batch_get_batch_jobsop.py b/src/mistralai/client/models/jobs_api_routes_batch_get_batch_jobsop.py new file mode 100644 index 00000000..89ac3c93 --- /dev/null +++ b/src/mistralai/client/models/jobs_api_routes_batch_get_batch_jobsop.py @@ -0,0 +1,121 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: d8f0af99c94d + +from __future__ import annotations +from .batchjobstatus import BatchJobStatus +from datetime import datetime +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import FieldMetadata, QueryParamMetadata +from pydantic import model_serializer +from typing import Any, Dict, List, Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +OrderBy = Literal[ + "created", + "-created", +] + + +class JobsAPIRoutesBatchGetBatchJobsRequestTypedDict(TypedDict): + page: NotRequired[int] + page_size: NotRequired[int] + model: NotRequired[Nullable[str]] + agent_id: NotRequired[Nullable[str]] + metadata: NotRequired[Nullable[Dict[str, Any]]] + created_after: NotRequired[Nullable[datetime]] + created_by_me: NotRequired[bool] + status: NotRequired[Nullable[List[BatchJobStatus]]] + order_by: NotRequired[OrderBy] + + +class JobsAPIRoutesBatchGetBatchJobsRequest(BaseModel): + page: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 0 + + page_size: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 100 + + model: Annotated[ + OptionalNullable[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + agent_id: Annotated[ + OptionalNullable[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + metadata: Annotated[ + OptionalNullable[Dict[str, Any]], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + created_after: Annotated[ + OptionalNullable[datetime], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + created_by_me: Annotated[ + Optional[bool], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = False + + status: Annotated[ + OptionalNullable[List[BatchJobStatus]], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + order_by: Annotated[ + Optional[OrderBy], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = "-created" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "page", + "page_size", + "model", + "agent_id", + "metadata", + "created_after", + "created_by_me", + "status", + "order_by", + ] + ) + nullable_fields = set( + ["model", "agent_id", "metadata", "created_after", "status"] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py b/src/mistralai/client/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py new file mode 100644 index 00000000..9fa99837 --- /dev/null +++ b/src/mistralai/client/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py @@ -0,0 +1,19 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 34f89d2af0ec + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class JobsAPIRoutesFineTuningArchiveFineTunedModelRequestTypedDict(TypedDict): + model_id: str + r"""The ID of the model to archive.""" + + +class JobsAPIRoutesFineTuningArchiveFineTunedModelRequest(BaseModel): + model_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + r"""The ID of the model to archive.""" diff --git a/src/mistralai/client/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py b/src/mistralai/client/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py new file mode 100644 index 00000000..56fa5340 --- /dev/null +++ b/src/mistralai/client/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py @@ -0,0 +1,78 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: d175c6e32ecb + +from __future__ import annotations +from .classifierfinetuningjobdetails import ( + ClassifierFineTuningJobDetails, + ClassifierFineTuningJobDetailsTypedDict, +) +from .completionfinetuningjobdetails import ( + CompletionFineTuningJobDetails, + CompletionFineTuningJobDetailsTypedDict, +) +from functools import partial +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from mistralai.client.utils.unions import parse_open_union +from pydantic import ConfigDict +from pydantic.functional_validators import BeforeValidator +from typing import Any, Literal, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +class JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict(TypedDict): + job_id: str + r"""The ID of the job to cancel.""" + + +class JobsAPIRoutesFineTuningCancelFineTuningJobRequest(BaseModel): + job_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + r"""The ID of the job to cancel.""" + + +JobsAPIRoutesFineTuningCancelFineTuningJobResponseTypedDict = TypeAliasType( + "JobsAPIRoutesFineTuningCancelFineTuningJobResponseTypedDict", + Union[ + CompletionFineTuningJobDetailsTypedDict, ClassifierFineTuningJobDetailsTypedDict + ], +) +r"""OK""" + + +class UnknownJobsAPIRoutesFineTuningCancelFineTuningJobResponse(BaseModel): + r"""A JobsAPIRoutesFineTuningCancelFineTuningJobResponse variant the SDK doesn't recognize. Preserves the raw payload.""" + + job_type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_JOBS_API_ROUTES_FINE_TUNING_CANCEL_FINE_TUNING_JOB_RESPONSE_VARIANTS: dict[ + str, Any +] = { + "classifier": ClassifierFineTuningJobDetails, + "completion": CompletionFineTuningJobDetails, +} + + +JobsAPIRoutesFineTuningCancelFineTuningJobResponse = Annotated[ + Union[ + ClassifierFineTuningJobDetails, + CompletionFineTuningJobDetails, + UnknownJobsAPIRoutesFineTuningCancelFineTuningJobResponse, + ], + BeforeValidator( + partial( + parse_open_union, + disc_key="job_type", + variants=_JOBS_API_ROUTES_FINE_TUNING_CANCEL_FINE_TUNING_JOB_RESPONSE_VARIANTS, + unknown_cls=UnknownJobsAPIRoutesFineTuningCancelFineTuningJobResponse, + union_name="JobsAPIRoutesFineTuningCancelFineTuningJobResponse", + ) + ), +] +r"""OK""" diff --git a/src/mistralai/client/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py b/src/mistralai/client/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py new file mode 100644 index 00000000..db857f7d --- /dev/null +++ b/src/mistralai/client/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py @@ -0,0 +1,70 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 81651291187a + +from __future__ import annotations +from .classifierfinetuningjob import ( + ClassifierFineTuningJob, + ClassifierFineTuningJobTypedDict, +) +from .completionfinetuningjob import ( + CompletionFineTuningJob, + CompletionFineTuningJobTypedDict, +) +from .legacyjobmetadata import LegacyJobMetadata, LegacyJobMetadataTypedDict +from functools import partial +from mistralai.client.types import BaseModel +from mistralai.client.utils.unions import parse_open_union +from pydantic import ConfigDict +from pydantic.functional_validators import BeforeValidator +from typing import Any, Literal, Union +from typing_extensions import Annotated, TypeAliasType + + +ResponseTypedDict = TypeAliasType( + "ResponseTypedDict", + Union[ClassifierFineTuningJobTypedDict, CompletionFineTuningJobTypedDict], +) + + +class UnknownResponse(BaseModel): + r"""A Response variant the SDK doesn't recognize. Preserves the raw payload.""" + + job_type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_RESPONSE_VARIANTS: dict[str, Any] = { + "classifier": ClassifierFineTuningJob, + "completion": CompletionFineTuningJob, +} + + +Response = Annotated[ + Union[ClassifierFineTuningJob, CompletionFineTuningJob, UnknownResponse], + BeforeValidator( + partial( + parse_open_union, + disc_key="job_type", + variants=_RESPONSE_VARIANTS, + unknown_cls=UnknownResponse, + union_name="Response", + ) + ), +] + + +JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict = TypeAliasType( + "JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict", + Union[LegacyJobMetadataTypedDict, ResponseTypedDict], +) +r"""OK""" + + +JobsAPIRoutesFineTuningCreateFineTuningJobResponse = TypeAliasType( + "JobsAPIRoutesFineTuningCreateFineTuningJobResponse", + Union[LegacyJobMetadata, Response], +) +r"""OK""" diff --git a/src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py b/src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py new file mode 100644 index 00000000..ddd9c189 --- /dev/null +++ b/src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py @@ -0,0 +1,76 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: d910fd8fe2d6 + +from __future__ import annotations +from .classifierfinetuningjobdetails import ( + ClassifierFineTuningJobDetails, + ClassifierFineTuningJobDetailsTypedDict, +) +from .completionfinetuningjobdetails import ( + CompletionFineTuningJobDetails, + CompletionFineTuningJobDetailsTypedDict, +) +from functools import partial +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from mistralai.client.utils.unions import parse_open_union +from pydantic import ConfigDict +from pydantic.functional_validators import BeforeValidator +from typing import Any, Literal, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +class JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict(TypedDict): + job_id: str + r"""The ID of the job to analyse.""" + + +class JobsAPIRoutesFineTuningGetFineTuningJobRequest(BaseModel): + job_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + r"""The ID of the job to analyse.""" + + +JobsAPIRoutesFineTuningGetFineTuningJobResponseTypedDict = TypeAliasType( + "JobsAPIRoutesFineTuningGetFineTuningJobResponseTypedDict", + Union[ + CompletionFineTuningJobDetailsTypedDict, ClassifierFineTuningJobDetailsTypedDict + ], +) +r"""OK""" + + +class UnknownJobsAPIRoutesFineTuningGetFineTuningJobResponse(BaseModel): + r"""A JobsAPIRoutesFineTuningGetFineTuningJobResponse variant the SDK doesn't recognize. Preserves the raw payload.""" + + job_type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_JOBS_API_ROUTES_FINE_TUNING_GET_FINE_TUNING_JOB_RESPONSE_VARIANTS: dict[str, Any] = { + "classifier": ClassifierFineTuningJobDetails, + "completion": CompletionFineTuningJobDetails, +} + + +JobsAPIRoutesFineTuningGetFineTuningJobResponse = Annotated[ + Union[ + ClassifierFineTuningJobDetails, + CompletionFineTuningJobDetails, + UnknownJobsAPIRoutesFineTuningGetFineTuningJobResponse, + ], + BeforeValidator( + partial( + parse_open_union, + disc_key="job_type", + variants=_JOBS_API_ROUTES_FINE_TUNING_GET_FINE_TUNING_JOB_RESPONSE_VARIANTS, + unknown_cls=UnknownJobsAPIRoutesFineTuningGetFineTuningJobResponse, + union_name="JobsAPIRoutesFineTuningGetFineTuningJobResponse", + ) + ), +] +r"""OK""" diff --git a/src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py b/src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py new file mode 100644 index 00000000..ec80a158 --- /dev/null +++ b/src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py @@ -0,0 +1,162 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: cf43028824bf + +from __future__ import annotations +from datetime import datetime +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import FieldMetadata, QueryParamMetadata +from pydantic import model_serializer +from typing import Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +JobsAPIRoutesFineTuningGetFineTuningJobsStatus = Literal[ + "QUEUED", + "STARTED", + "VALIDATING", + "VALIDATED", + "RUNNING", + "FAILED_VALIDATION", + "FAILED", + "SUCCESS", + "CANCELLED", + "CANCELLATION_REQUESTED", +] +r"""The current job state to filter on. When set, the other results are not displayed.""" + + +class JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict(TypedDict): + page: NotRequired[int] + r"""The page number of the results to be returned.""" + page_size: NotRequired[int] + r"""The number of items to return per page.""" + model: NotRequired[Nullable[str]] + r"""The model name used for fine-tuning to filter on. When set, the other results are not displayed.""" + created_after: NotRequired[Nullable[datetime]] + r"""The date/time to filter on. When set, the results for previous creation times are not displayed.""" + created_before: NotRequired[Nullable[datetime]] + created_by_me: NotRequired[bool] + r"""When set, only return results for jobs created by the API caller. Other results are not displayed.""" + status: NotRequired[Nullable[JobsAPIRoutesFineTuningGetFineTuningJobsStatus]] + r"""The current job state to filter on. When set, the other results are not displayed.""" + wandb_project: NotRequired[Nullable[str]] + r"""The Weights and Biases project to filter on. When set, the other results are not displayed.""" + wandb_name: NotRequired[Nullable[str]] + r"""The Weight and Biases run name to filter on. When set, the other results are not displayed.""" + suffix: NotRequired[Nullable[str]] + r"""The model suffix to filter on. When set, the other results are not displayed.""" + + +class JobsAPIRoutesFineTuningGetFineTuningJobsRequest(BaseModel): + page: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 0 + r"""The page number of the results to be returned.""" + + page_size: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 100 + r"""The number of items to return per page.""" + + model: Annotated[ + OptionalNullable[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + r"""The model name used for fine-tuning to filter on. When set, the other results are not displayed.""" + + created_after: Annotated[ + OptionalNullable[datetime], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + r"""The date/time to filter on. When set, the results for previous creation times are not displayed.""" + + created_before: Annotated[ + OptionalNullable[datetime], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + created_by_me: Annotated[ + Optional[bool], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = False + r"""When set, only return results for jobs created by the API caller. Other results are not displayed.""" + + status: Annotated[ + OptionalNullable[JobsAPIRoutesFineTuningGetFineTuningJobsStatus], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + r"""The current job state to filter on. When set, the other results are not displayed.""" + + wandb_project: Annotated[ + OptionalNullable[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + r"""The Weights and Biases project to filter on. When set, the other results are not displayed.""" + + wandb_name: Annotated[ + OptionalNullable[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + r"""The Weight and Biases run name to filter on. When set, the other results are not displayed.""" + + suffix: Annotated[ + OptionalNullable[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + r"""The model suffix to filter on. When set, the other results are not displayed.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "page", + "page_size", + "model", + "created_after", + "created_before", + "created_by_me", + "status", + "wandb_project", + "wandb_name", + "suffix", + ] + ) + nullable_fields = set( + [ + "model", + "created_after", + "created_before", + "status", + "wandb_project", + "wandb_name", + "suffix", + ] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py b/src/mistralai/client/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py new file mode 100644 index 00000000..cd25fa04 --- /dev/null +++ b/src/mistralai/client/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py @@ -0,0 +1,74 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: e7ff4a4a4edb + +from __future__ import annotations +from .classifierfinetuningjobdetails import ( + ClassifierFineTuningJobDetails, + ClassifierFineTuningJobDetailsTypedDict, +) +from .completionfinetuningjobdetails import ( + CompletionFineTuningJobDetails, + CompletionFineTuningJobDetailsTypedDict, +) +from functools import partial +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from mistralai.client.utils.unions import parse_open_union +from pydantic import ConfigDict +from pydantic.functional_validators import BeforeValidator +from typing import Any, Literal, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +class JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict(TypedDict): + job_id: str + + +class JobsAPIRoutesFineTuningStartFineTuningJobRequest(BaseModel): + job_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + +JobsAPIRoutesFineTuningStartFineTuningJobResponseTypedDict = TypeAliasType( + "JobsAPIRoutesFineTuningStartFineTuningJobResponseTypedDict", + Union[ + CompletionFineTuningJobDetailsTypedDict, ClassifierFineTuningJobDetailsTypedDict + ], +) +r"""OK""" + + +class UnknownJobsAPIRoutesFineTuningStartFineTuningJobResponse(BaseModel): + r"""A JobsAPIRoutesFineTuningStartFineTuningJobResponse variant the SDK doesn't recognize. Preserves the raw payload.""" + + job_type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_JOBS_API_ROUTES_FINE_TUNING_START_FINE_TUNING_JOB_RESPONSE_VARIANTS: dict[str, Any] = { + "classifier": ClassifierFineTuningJobDetails, + "completion": CompletionFineTuningJobDetails, +} + + +JobsAPIRoutesFineTuningStartFineTuningJobResponse = Annotated[ + Union[ + ClassifierFineTuningJobDetails, + CompletionFineTuningJobDetails, + UnknownJobsAPIRoutesFineTuningStartFineTuningJobResponse, + ], + BeforeValidator( + partial( + parse_open_union, + disc_key="job_type", + variants=_JOBS_API_ROUTES_FINE_TUNING_START_FINE_TUNING_JOB_RESPONSE_VARIANTS, + unknown_cls=UnknownJobsAPIRoutesFineTuningStartFineTuningJobResponse, + union_name="JobsAPIRoutesFineTuningStartFineTuningJobResponse", + ) + ), +] +r"""OK""" diff --git a/src/mistralai/client/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py b/src/mistralai/client/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py new file mode 100644 index 00000000..fd01fe69 --- /dev/null +++ b/src/mistralai/client/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py @@ -0,0 +1,19 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 7cc1c80335a9 + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequestTypedDict(TypedDict): + model_id: str + r"""The ID of the model to unarchive.""" + + +class JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest(BaseModel): + model_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + r"""The ID of the model to unarchive.""" diff --git a/src/mistralai/client/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py b/src/mistralai/client/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py new file mode 100644 index 00000000..296070b4 --- /dev/null +++ b/src/mistralai/client/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py @@ -0,0 +1,83 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 6d9dc624aafd + +from __future__ import annotations +from .classifierfinetunedmodel import ( + ClassifierFineTunedModel, + ClassifierFineTunedModelTypedDict, +) +from .completionfinetunedmodel import ( + CompletionFineTunedModel, + CompletionFineTunedModelTypedDict, +) +from .updatemodelrequest import UpdateModelRequest, UpdateModelRequestTypedDict +from functools import partial +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, RequestMetadata +from mistralai.client.utils.unions import parse_open_union +from pydantic import ConfigDict +from pydantic.functional_validators import BeforeValidator +from typing import Any, Literal, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +class JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict(TypedDict): + model_id: str + r"""The ID of the model to update.""" + update_model_request: UpdateModelRequestTypedDict + + +class JobsAPIRoutesFineTuningUpdateFineTunedModelRequest(BaseModel): + model_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + r"""The ID of the model to update.""" + + update_model_request: Annotated[ + UpdateModelRequest, + FieldMetadata(request=RequestMetadata(media_type="application/json")), + ] + + +JobsAPIRoutesFineTuningUpdateFineTunedModelResponseTypedDict = TypeAliasType( + "JobsAPIRoutesFineTuningUpdateFineTunedModelResponseTypedDict", + Union[CompletionFineTunedModelTypedDict, ClassifierFineTunedModelTypedDict], +) +r"""OK""" + + +class UnknownJobsAPIRoutesFineTuningUpdateFineTunedModelResponse(BaseModel): + r"""A JobsAPIRoutesFineTuningUpdateFineTunedModelResponse variant the SDK doesn't recognize. Preserves the raw payload.""" + + model_type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_JOBS_API_ROUTES_FINE_TUNING_UPDATE_FINE_TUNED_MODEL_RESPONSE_VARIANTS: dict[ + str, Any +] = { + "classifier": ClassifierFineTunedModel, + "completion": CompletionFineTunedModel, +} + + +JobsAPIRoutesFineTuningUpdateFineTunedModelResponse = Annotated[ + Union[ + ClassifierFineTunedModel, + CompletionFineTunedModel, + UnknownJobsAPIRoutesFineTuningUpdateFineTunedModelResponse, + ], + BeforeValidator( + partial( + parse_open_union, + disc_key="model_type", + variants=_JOBS_API_ROUTES_FINE_TUNING_UPDATE_FINE_TUNED_MODEL_RESPONSE_VARIANTS, + unknown_cls=UnknownJobsAPIRoutesFineTuningUpdateFineTunedModelResponse, + union_name="JobsAPIRoutesFineTuningUpdateFineTunedModelResponse", + ) + ), +] +r"""OK""" diff --git a/src/mistralai/client/models/jsonschema.py b/src/mistralai/client/models/jsonschema.py new file mode 100644 index 00000000..dfababa6 --- /dev/null +++ b/src/mistralai/client/models/jsonschema.py @@ -0,0 +1,63 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: e1fc1d8a434a + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +import pydantic +from pydantic import model_serializer +from typing import Any, Dict, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class JSONSchemaTypedDict(TypedDict): + name: str + schema_definition: Dict[str, Any] + description: NotRequired[Nullable[str]] + strict: NotRequired[bool] + + +class JSONSchema(BaseModel): + name: str + + schema_definition: Annotated[Dict[str, Any], pydantic.Field(alias="schema")] + + description: OptionalNullable[str] = UNSET + + strict: Optional[bool] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["description", "strict"]) + nullable_fields = set(["description"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + JSONSchema.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/legacyjobmetadata.py b/src/mistralai/client/models/legacyjobmetadata.py new file mode 100644 index 00000000..57576758 --- /dev/null +++ b/src/mistralai/client/models/legacyjobmetadata.py @@ -0,0 +1,137 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 0330b8930f65 + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class LegacyJobMetadataTypedDict(TypedDict): + details: str + expected_duration_seconds: NotRequired[Nullable[int]] + r"""The approximated time (in seconds) for the fine-tuning process to complete.""" + cost: NotRequired[Nullable[float]] + r"""The cost of the fine-tuning job.""" + cost_currency: NotRequired[Nullable[str]] + r"""The currency used for the fine-tuning job cost.""" + train_tokens_per_step: NotRequired[Nullable[int]] + r"""The number of tokens consumed by one training step.""" + train_tokens: NotRequired[Nullable[int]] + r"""The total number of tokens used during the fine-tuning process.""" + data_tokens: NotRequired[Nullable[int]] + r"""The total number of tokens in the training dataset.""" + estimated_start_time: NotRequired[Nullable[int]] + deprecated: NotRequired[bool] + epochs: NotRequired[Nullable[float]] + r"""The number of complete passes through the entire training dataset.""" + training_steps: NotRequired[Nullable[int]] + r"""The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset.""" + object: Literal["job.metadata"] + + +class LegacyJobMetadata(BaseModel): + details: str + + expected_duration_seconds: OptionalNullable[int] = UNSET + r"""The approximated time (in seconds) for the fine-tuning process to complete.""" + + cost: OptionalNullable[float] = UNSET + r"""The cost of the fine-tuning job.""" + + cost_currency: OptionalNullable[str] = UNSET + r"""The currency used for the fine-tuning job cost.""" + + train_tokens_per_step: OptionalNullable[int] = UNSET + r"""The number of tokens consumed by one training step.""" + + train_tokens: OptionalNullable[int] = UNSET + r"""The total number of tokens used during the fine-tuning process.""" + + data_tokens: OptionalNullable[int] = UNSET + r"""The total number of tokens in the training dataset.""" + + estimated_start_time: OptionalNullable[int] = UNSET + + deprecated: Optional[bool] = True + + epochs: OptionalNullable[float] = UNSET + r"""The number of complete passes through the entire training dataset.""" + + training_steps: OptionalNullable[int] = UNSET + r"""The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset.""" + + object: Annotated[ + Annotated[ + Optional[Literal["job.metadata"]], + AfterValidator(validate_const("job.metadata")), + ], + pydantic.Field(alias="object"), + ] = "job.metadata" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "expected_duration_seconds", + "cost", + "cost_currency", + "train_tokens_per_step", + "train_tokens", + "data_tokens", + "estimated_start_time", + "deprecated", + "epochs", + "training_steps", + "object", + ] + ) + nullable_fields = set( + [ + "expected_duration_seconds", + "cost", + "cost_currency", + "train_tokens_per_step", + "train_tokens", + "data_tokens", + "estimated_start_time", + "epochs", + "training_steps", + ] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + LegacyJobMetadata.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/libraries_delete_v1op.py b/src/mistralai/client/models/libraries_delete_v1op.py new file mode 100644 index 00000000..893ab53b --- /dev/null +++ b/src/mistralai/client/models/libraries_delete_v1op.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: b2e8bbd19baa + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class LibrariesDeleteV1RequestTypedDict(TypedDict): + library_id: str + + +class LibrariesDeleteV1Request(BaseModel): + library_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/client/models/libraries_documents_delete_v1op.py b/src/mistralai/client/models/libraries_documents_delete_v1op.py new file mode 100644 index 00000000..0495832e --- /dev/null +++ b/src/mistralai/client/models/libraries_documents_delete_v1op.py @@ -0,0 +1,22 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 81eb34382a3d + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class LibrariesDocumentsDeleteV1RequestTypedDict(TypedDict): + library_id: str + document_id: str + + +class LibrariesDocumentsDeleteV1Request(BaseModel): + library_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + document_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/client/models/libraries_documents_get_extracted_text_signed_url_v1op.py b/src/mistralai/client/models/libraries_documents_get_extracted_text_signed_url_v1op.py new file mode 100644 index 00000000..186baaed --- /dev/null +++ b/src/mistralai/client/models/libraries_documents_get_extracted_text_signed_url_v1op.py @@ -0,0 +1,22 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: a7417ebd6040 + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class LibrariesDocumentsGetExtractedTextSignedURLV1RequestTypedDict(TypedDict): + library_id: str + document_id: str + + +class LibrariesDocumentsGetExtractedTextSignedURLV1Request(BaseModel): + library_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + document_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/client/models/libraries_documents_get_signed_url_v1op.py b/src/mistralai/client/models/libraries_documents_get_signed_url_v1op.py new file mode 100644 index 00000000..ebcf85d7 --- /dev/null +++ b/src/mistralai/client/models/libraries_documents_get_signed_url_v1op.py @@ -0,0 +1,22 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: d4b7b47913ba + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class LibrariesDocumentsGetSignedURLV1RequestTypedDict(TypedDict): + library_id: str + document_id: str + + +class LibrariesDocumentsGetSignedURLV1Request(BaseModel): + library_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + document_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/client/models/libraries_documents_get_status_v1op.py b/src/mistralai/client/models/libraries_documents_get_status_v1op.py new file mode 100644 index 00000000..1f484787 --- /dev/null +++ b/src/mistralai/client/models/libraries_documents_get_status_v1op.py @@ -0,0 +1,22 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: f314f73e909c + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class LibrariesDocumentsGetStatusV1RequestTypedDict(TypedDict): + library_id: str + document_id: str + + +class LibrariesDocumentsGetStatusV1Request(BaseModel): + library_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + document_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/client/models/libraries_documents_get_text_content_v1op.py b/src/mistralai/client/models/libraries_documents_get_text_content_v1op.py new file mode 100644 index 00000000..e0508d66 --- /dev/null +++ b/src/mistralai/client/models/libraries_documents_get_text_content_v1op.py @@ -0,0 +1,22 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 1ca4e0c41321 + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class LibrariesDocumentsGetTextContentV1RequestTypedDict(TypedDict): + library_id: str + document_id: str + + +class LibrariesDocumentsGetTextContentV1Request(BaseModel): + library_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + document_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/client/models/libraries_documents_get_v1op.py b/src/mistralai/client/models/libraries_documents_get_v1op.py new file mode 100644 index 00000000..857dfbe6 --- /dev/null +++ b/src/mistralai/client/models/libraries_documents_get_v1op.py @@ -0,0 +1,22 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 26ff35f0c69d + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class LibrariesDocumentsGetV1RequestTypedDict(TypedDict): + library_id: str + document_id: str + + +class LibrariesDocumentsGetV1Request(BaseModel): + library_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + document_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/client/models/libraries_documents_list_v1op.py b/src/mistralai/client/models/libraries_documents_list_v1op.py new file mode 100644 index 00000000..da7d793b --- /dev/null +++ b/src/mistralai/client/models/libraries_documents_list_v1op.py @@ -0,0 +1,95 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 756f26de3cbe + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata +from pydantic import model_serializer +from typing import Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class LibrariesDocumentsListV1RequestTypedDict(TypedDict): + library_id: str + search: NotRequired[Nullable[str]] + page_size: NotRequired[int] + page: NotRequired[int] + filters_attributes: NotRequired[Nullable[str]] + sort_by: NotRequired[str] + sort_order: NotRequired[str] + + +class LibrariesDocumentsListV1Request(BaseModel): + library_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + search: Annotated[ + OptionalNullable[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + page_size: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 100 + + page: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 0 + + filters_attributes: Annotated[ + OptionalNullable[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + sort_by: Annotated[ + Optional[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = "created_at" + + sort_order: Annotated[ + Optional[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = "desc" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "search", + "page_size", + "page", + "filters_attributes", + "sort_by", + "sort_order", + ] + ) + nullable_fields = set(["search", "filters_attributes"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/libraries_documents_reprocess_v1op.py b/src/mistralai/client/models/libraries_documents_reprocess_v1op.py new file mode 100644 index 00000000..a2f9ba2a --- /dev/null +++ b/src/mistralai/client/models/libraries_documents_reprocess_v1op.py @@ -0,0 +1,22 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: dbbeb02fc336 + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class LibrariesDocumentsReprocessV1RequestTypedDict(TypedDict): + library_id: str + document_id: str + + +class LibrariesDocumentsReprocessV1Request(BaseModel): + library_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + document_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/client/models/libraries_documents_update_v1op.py b/src/mistralai/client/models/libraries_documents_update_v1op.py new file mode 100644 index 00000000..7ad4231f --- /dev/null +++ b/src/mistralai/client/models/libraries_documents_update_v1op.py @@ -0,0 +1,29 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 734ba6c19f5f + +from __future__ import annotations +from .updatedocumentrequest import UpdateDocumentRequest, UpdateDocumentRequestTypedDict +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, RequestMetadata +from typing_extensions import Annotated, TypedDict + + +class LibrariesDocumentsUpdateV1RequestTypedDict(TypedDict): + library_id: str + document_id: str + update_document_request: UpdateDocumentRequestTypedDict + + +class LibrariesDocumentsUpdateV1Request(BaseModel): + library_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + document_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + update_document_request: Annotated[ + UpdateDocumentRequest, + FieldMetadata(request=RequestMetadata(media_type="application/json")), + ] diff --git a/src/mistralai/client/models/libraries_documents_upload_v1op.py b/src/mistralai/client/models/libraries_documents_upload_v1op.py new file mode 100644 index 00000000..388633d1 --- /dev/null +++ b/src/mistralai/client/models/libraries_documents_upload_v1op.py @@ -0,0 +1,57 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 744466971862 + +from __future__ import annotations +from .file import File, FileTypedDict +from mistralai.client.types import BaseModel +from mistralai.client.utils import ( + FieldMetadata, + MultipartFormMetadata, + PathParamMetadata, + RequestMetadata, +) +from typing_extensions import Annotated, TypedDict + + +class DocumentUploadTypedDict(TypedDict): + file: FileTypedDict + r"""The File object (not file name) to be uploaded. + To upload a file and specify a custom file name you should format your request as such: + ```bash + file=@path/to/your/file.jsonl;filename=custom_name.jsonl + ``` + Otherwise, you can just keep the original file name: + ```bash + file=@path/to/your/file.jsonl + ``` + """ + + +class DocumentUpload(BaseModel): + file: Annotated[File, FieldMetadata(multipart=MultipartFormMetadata(file=True))] + r"""The File object (not file name) to be uploaded. + To upload a file and specify a custom file name you should format your request as such: + ```bash + file=@path/to/your/file.jsonl;filename=custom_name.jsonl + ``` + Otherwise, you can just keep the original file name: + ```bash + file=@path/to/your/file.jsonl + ``` + """ + + +class LibrariesDocumentsUploadV1RequestTypedDict(TypedDict): + library_id: str + request_body: DocumentUploadTypedDict + + +class LibrariesDocumentsUploadV1Request(BaseModel): + library_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + request_body: Annotated[ + DocumentUpload, + FieldMetadata(request=RequestMetadata(media_type="multipart/form-data")), + ] diff --git a/src/mistralai/client/models/libraries_get_v1op.py b/src/mistralai/client/models/libraries_get_v1op.py new file mode 100644 index 00000000..7a51d605 --- /dev/null +++ b/src/mistralai/client/models/libraries_get_v1op.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: d493f39e7ebb + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class LibrariesGetV1RequestTypedDict(TypedDict): + library_id: str + + +class LibrariesGetV1Request(BaseModel): + library_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/client/models/libraries_share_create_v1op.py b/src/mistralai/client/models/libraries_share_create_v1op.py new file mode 100644 index 00000000..00ea7482 --- /dev/null +++ b/src/mistralai/client/models/libraries_share_create_v1op.py @@ -0,0 +1,23 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: feaacfd46dd3 + +from __future__ import annotations +from .sharingin import SharingIn, SharingInTypedDict +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, RequestMetadata +from typing_extensions import Annotated, TypedDict + + +class LibrariesShareCreateV1RequestTypedDict(TypedDict): + library_id: str + sharing_in: SharingInTypedDict + + +class LibrariesShareCreateV1Request(BaseModel): + library_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + sharing_in: Annotated[ + SharingIn, FieldMetadata(request=RequestMetadata(media_type="application/json")) + ] diff --git a/src/mistralai/client/models/libraries_share_delete_v1op.py b/src/mistralai/client/models/libraries_share_delete_v1op.py new file mode 100644 index 00000000..eca3f86a --- /dev/null +++ b/src/mistralai/client/models/libraries_share_delete_v1op.py @@ -0,0 +1,24 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 7f3a679ca384 + +from __future__ import annotations +from .sharingdelete import SharingDelete, SharingDeleteTypedDict +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, RequestMetadata +from typing_extensions import Annotated, TypedDict + + +class LibrariesShareDeleteV1RequestTypedDict(TypedDict): + library_id: str + sharing_delete: SharingDeleteTypedDict + + +class LibrariesShareDeleteV1Request(BaseModel): + library_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + sharing_delete: Annotated[ + SharingDelete, + FieldMetadata(request=RequestMetadata(media_type="application/json")), + ] diff --git a/src/mistralai/client/models/libraries_share_list_v1op.py b/src/mistralai/client/models/libraries_share_list_v1op.py new file mode 100644 index 00000000..895a2590 --- /dev/null +++ b/src/mistralai/client/models/libraries_share_list_v1op.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 8f0af379bf1c + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class LibrariesShareListV1RequestTypedDict(TypedDict): + library_id: str + + +class LibrariesShareListV1Request(BaseModel): + library_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/client/models/libraries_update_v1op.py b/src/mistralai/client/models/libraries_update_v1op.py new file mode 100644 index 00000000..54b0ab70 --- /dev/null +++ b/src/mistralai/client/models/libraries_update_v1op.py @@ -0,0 +1,24 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 92c8d4132252 + +from __future__ import annotations +from .updatelibraryrequest import UpdateLibraryRequest, UpdateLibraryRequestTypedDict +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, RequestMetadata +from typing_extensions import Annotated, TypedDict + + +class LibrariesUpdateV1RequestTypedDict(TypedDict): + library_id: str + update_library_request: UpdateLibraryRequestTypedDict + + +class LibrariesUpdateV1Request(BaseModel): + library_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + update_library_request: Annotated[ + UpdateLibraryRequest, + FieldMetadata(request=RequestMetadata(media_type="application/json")), + ] diff --git a/src/mistralai/client/models/library.py b/src/mistralai/client/models/library.py new file mode 100644 index 00000000..1953b6fb --- /dev/null +++ b/src/mistralai/client/models/library.py @@ -0,0 +1,116 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 028a34b08f9c + +from __future__ import annotations +from datetime import datetime +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class LibraryTypedDict(TypedDict): + id: str + name: str + created_at: datetime + updated_at: datetime + owner_id: Nullable[str] + owner_type: str + total_size: int + nb_documents: int + chunk_size: Nullable[int] + emoji: NotRequired[Nullable[str]] + description: NotRequired[Nullable[str]] + generated_description: NotRequired[Nullable[str]] + explicit_user_members_count: NotRequired[Nullable[int]] + explicit_workspace_members_count: NotRequired[Nullable[int]] + org_sharing_role: NotRequired[Nullable[str]] + generated_name: NotRequired[Nullable[str]] + r"""Generated Name""" + + +class Library(BaseModel): + id: str + + name: str + + created_at: datetime + + updated_at: datetime + + owner_id: Nullable[str] + + owner_type: str + + total_size: int + + nb_documents: int + + chunk_size: Nullable[int] + + emoji: OptionalNullable[str] = UNSET + + description: OptionalNullable[str] = UNSET + + generated_description: OptionalNullable[str] = UNSET + + explicit_user_members_count: OptionalNullable[int] = UNSET + + explicit_workspace_members_count: OptionalNullable[int] = UNSET + + org_sharing_role: OptionalNullable[str] = UNSET + + generated_name: OptionalNullable[str] = UNSET + r"""Generated Name""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "emoji", + "description", + "generated_description", + "explicit_user_members_count", + "explicit_workspace_members_count", + "org_sharing_role", + "generated_name", + ] + ) + nullable_fields = set( + [ + "owner_id", + "chunk_size", + "emoji", + "description", + "generated_description", + "explicit_user_members_count", + "explicit_workspace_members_count", + "org_sharing_role", + "generated_name", + ] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/listbatchjobsresponse.py b/src/mistralai/client/models/listbatchjobsresponse.py new file mode 100644 index 00000000..35a348a1 --- /dev/null +++ b/src/mistralai/client/models/listbatchjobsresponse.py @@ -0,0 +1,51 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 99d94c86a871 + +from __future__ import annotations +from .batchjob import BatchJob, BatchJobTypedDict +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class ListBatchJobsResponseTypedDict(TypedDict): + total: int + data: NotRequired[List[BatchJobTypedDict]] + object: Literal["list"] + + +class ListBatchJobsResponse(BaseModel): + total: int + + data: Optional[List[BatchJob]] = None + + object: Annotated[ + Annotated[Optional[Literal["list"]], AfterValidator(validate_const("list"))], + pydantic.Field(alias="object"), + ] = "list" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["data", "object"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ListBatchJobsResponse.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/listdocumentsresponse.py b/src/mistralai/client/models/listdocumentsresponse.py new file mode 100644 index 00000000..c48b8c05 --- /dev/null +++ b/src/mistralai/client/models/listdocumentsresponse.py @@ -0,0 +1,20 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: f593d8e66833 + +from __future__ import annotations +from .document import Document, DocumentTypedDict +from .paginationinfo import PaginationInfo, PaginationInfoTypedDict +from mistralai.client.types import BaseModel +from typing import List +from typing_extensions import TypedDict + + +class ListDocumentsResponseTypedDict(TypedDict): + pagination: PaginationInfoTypedDict + data: List[DocumentTypedDict] + + +class ListDocumentsResponse(BaseModel): + pagination: PaginationInfo + + data: List[Document] diff --git a/src/mistralai/client/models/listfilesresponse.py b/src/mistralai/client/models/listfilesresponse.py new file mode 100644 index 00000000..10a60126 --- /dev/null +++ b/src/mistralai/client/models/listfilesresponse.py @@ -0,0 +1,54 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 85d6d24c1a19 + +from __future__ import annotations +from .fileschema import FileSchema, FileSchemaTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List +from typing_extensions import NotRequired, TypedDict + + +class ListFilesResponseTypedDict(TypedDict): + data: List[FileSchemaTypedDict] + object: str + total: NotRequired[Nullable[int]] + + +class ListFilesResponse(BaseModel): + data: List[FileSchema] + + object: str + + total: OptionalNullable[int] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["total"]) + nullable_fields = set(["total"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/listfinetuningjobsresponse.py b/src/mistralai/client/models/listfinetuningjobsresponse.py new file mode 100644 index 00000000..1e434c59 --- /dev/null +++ b/src/mistralai/client/models/listfinetuningjobsresponse.py @@ -0,0 +1,100 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 118e05dbfbbd + +from __future__ import annotations +from .classifierfinetuningjob import ( + ClassifierFineTuningJob, + ClassifierFineTuningJobTypedDict, +) +from .completionfinetuningjob import ( + CompletionFineTuningJob, + CompletionFineTuningJobTypedDict, +) +from functools import partial +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +from mistralai.client.utils.unions import parse_open_union +import pydantic +from pydantic import ConfigDict, model_serializer +from pydantic.functional_validators import AfterValidator, BeforeValidator +from typing import Any, List, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +ListFineTuningJobsResponseDataTypedDict = TypeAliasType( + "ListFineTuningJobsResponseDataTypedDict", + Union[ClassifierFineTuningJobTypedDict, CompletionFineTuningJobTypedDict], +) + + +class UnknownListFineTuningJobsResponseData(BaseModel): + r"""A ListFineTuningJobsResponseData variant the SDK doesn't recognize. Preserves the raw payload.""" + + job_type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_LIST_FINE_TUNING_JOBS_RESPONSE_DATA_VARIANTS: dict[str, Any] = { + "classifier": ClassifierFineTuningJob, + "completion": CompletionFineTuningJob, +} + + +ListFineTuningJobsResponseData = Annotated[ + Union[ + ClassifierFineTuningJob, + CompletionFineTuningJob, + UnknownListFineTuningJobsResponseData, + ], + BeforeValidator( + partial( + parse_open_union, + disc_key="job_type", + variants=_LIST_FINE_TUNING_JOBS_RESPONSE_DATA_VARIANTS, + unknown_cls=UnknownListFineTuningJobsResponseData, + union_name="ListFineTuningJobsResponseData", + ) + ), +] + + +class ListFineTuningJobsResponseTypedDict(TypedDict): + total: int + data: NotRequired[List[ListFineTuningJobsResponseDataTypedDict]] + object: Literal["list"] + + +class ListFineTuningJobsResponse(BaseModel): + total: int + + data: Optional[List[ListFineTuningJobsResponseData]] = None + + object: Annotated[ + Annotated[Optional[Literal["list"]], AfterValidator(validate_const("list"))], + pydantic.Field(alias="object"), + ] = "list" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["data", "object"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ListFineTuningJobsResponse.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/listlibrariesresponse.py b/src/mistralai/client/models/listlibrariesresponse.py new file mode 100644 index 00000000..337fe105 --- /dev/null +++ b/src/mistralai/client/models/listlibrariesresponse.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: df556a618365 + +from __future__ import annotations +from .library import Library, LibraryTypedDict +from mistralai.client.types import BaseModel +from typing import List +from typing_extensions import TypedDict + + +class ListLibrariesResponseTypedDict(TypedDict): + data: List[LibraryTypedDict] + + +class ListLibrariesResponse(BaseModel): + data: List[Library] diff --git a/src/mistralai/client/models/listsharingout.py b/src/mistralai/client/models/listsharingout.py new file mode 100644 index 00000000..443ad0d6 --- /dev/null +++ b/src/mistralai/client/models/listsharingout.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: ee708a7ccdad + +from __future__ import annotations +from .sharingout import SharingOut, SharingOutTypedDict +from mistralai.client.types import BaseModel +from typing import List +from typing_extensions import TypedDict + + +class ListSharingOutTypedDict(TypedDict): + data: List[SharingOutTypedDict] + + +class ListSharingOut(BaseModel): + data: List[SharingOut] diff --git a/src/mistralai/client/models/messageentries.py b/src/mistralai/client/models/messageentries.py new file mode 100644 index 00000000..a95098e0 --- /dev/null +++ b/src/mistralai/client/models/messageentries.py @@ -0,0 +1,19 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: e13f9009902b + +from __future__ import annotations +from .messageinputentry import MessageInputEntry, MessageInputEntryTypedDict +from .messageoutputentry import MessageOutputEntry, MessageOutputEntryTypedDict +from typing import Union +from typing_extensions import TypeAliasType + + +MessageEntriesTypedDict = TypeAliasType( + "MessageEntriesTypedDict", + Union[MessageInputEntryTypedDict, MessageOutputEntryTypedDict], +) + + +MessageEntries = TypeAliasType( + "MessageEntries", Union[MessageInputEntry, MessageOutputEntry] +) diff --git a/src/mistralai/client/models/messageinputcontentchunks.py b/src/mistralai/client/models/messageinputcontentchunks.py new file mode 100644 index 00000000..1e04ce24 --- /dev/null +++ b/src/mistralai/client/models/messageinputcontentchunks.py @@ -0,0 +1,38 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 01025c12866a + +from __future__ import annotations +from .conversationthinkchunk import ( + ConversationThinkChunk, + ConversationThinkChunkTypedDict, +) +from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict +from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict +from .textchunk import TextChunk, TextChunkTypedDict +from .toolfilechunk import ToolFileChunk, ToolFileChunkTypedDict +from typing import Union +from typing_extensions import TypeAliasType + + +MessageInputContentChunksTypedDict = TypeAliasType( + "MessageInputContentChunksTypedDict", + Union[ + TextChunkTypedDict, + ImageURLChunkTypedDict, + DocumentURLChunkTypedDict, + ConversationThinkChunkTypedDict, + ToolFileChunkTypedDict, + ], +) + + +MessageInputContentChunks = TypeAliasType( + "MessageInputContentChunks", + Union[ + TextChunk, + ImageURLChunk, + DocumentURLChunk, + ConversationThinkChunk, + ToolFileChunk, + ], +) diff --git a/src/mistralai/client/models/messageinputentry.py b/src/mistralai/client/models/messageinputentry.py new file mode 100644 index 00000000..c948a13e --- /dev/null +++ b/src/mistralai/client/models/messageinputentry.py @@ -0,0 +1,118 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: c0a4b5179095 + +from __future__ import annotations +from .messageinputcontentchunks import ( + MessageInputContentChunks, + MessageInputContentChunksTypedDict, +) +from datetime import datetime +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, + UnrecognizedStr, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +Role = Union[ + Literal[ + "assistant", + "user", + ], + UnrecognizedStr, +] + + +MessageInputEntryContentTypedDict = TypeAliasType( + "MessageInputEntryContentTypedDict", + Union[str, List[MessageInputContentChunksTypedDict]], +) + + +MessageInputEntryContent = TypeAliasType( + "MessageInputEntryContent", Union[str, List[MessageInputContentChunks]] +) + + +class MessageInputEntryTypedDict(TypedDict): + r"""Representation of an input message inside the conversation.""" + + role: Role + content: MessageInputEntryContentTypedDict + object: Literal["entry"] + type: Literal["message.input"] + created_at: NotRequired[datetime] + completed_at: NotRequired[Nullable[datetime]] + id: NotRequired[str] + prefix: NotRequired[bool] + + +class MessageInputEntry(BaseModel): + r"""Representation of an input message inside the conversation.""" + + role: Role + + content: MessageInputEntryContent + + object: Annotated[ + Annotated[Optional[Literal["entry"]], AfterValidator(validate_const("entry"))], + pydantic.Field(alias="object"), + ] = "entry" + + type: Annotated[ + Annotated[ + Optional[Literal["message.input"]], + AfterValidator(validate_const("message.input")), + ], + pydantic.Field(alias="type"), + ] = "message.input" + + created_at: Optional[datetime] = None + + completed_at: OptionalNullable[datetime] = UNSET + + id: Optional[str] = None + + prefix: Optional[bool] = False + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + ["object", "type", "created_at", "completed_at", "id", "prefix"] + ) + nullable_fields = set(["completed_at"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + MessageInputEntry.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/messageoutputcontentchunks.py b/src/mistralai/client/models/messageoutputcontentchunks.py new file mode 100644 index 00000000..bf455d17 --- /dev/null +++ b/src/mistralai/client/models/messageoutputcontentchunks.py @@ -0,0 +1,41 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 2ed248515035 + +from __future__ import annotations +from .conversationthinkchunk import ( + ConversationThinkChunk, + ConversationThinkChunkTypedDict, +) +from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict +from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict +from .textchunk import TextChunk, TextChunkTypedDict +from .toolfilechunk import ToolFileChunk, ToolFileChunkTypedDict +from .toolreferencechunk import ToolReferenceChunk, ToolReferenceChunkTypedDict +from typing import Union +from typing_extensions import TypeAliasType + + +MessageOutputContentChunksTypedDict = TypeAliasType( + "MessageOutputContentChunksTypedDict", + Union[ + TextChunkTypedDict, + ImageURLChunkTypedDict, + DocumentURLChunkTypedDict, + ConversationThinkChunkTypedDict, + ToolFileChunkTypedDict, + ToolReferenceChunkTypedDict, + ], +) + + +MessageOutputContentChunks = TypeAliasType( + "MessageOutputContentChunks", + Union[ + TextChunk, + ImageURLChunk, + DocumentURLChunk, + ConversationThinkChunk, + ToolFileChunk, + ToolReferenceChunk, + ], +) diff --git a/src/mistralai/client/models/messageoutputentry.py b/src/mistralai/client/models/messageoutputentry.py new file mode 100644 index 00000000..6a9c52ed --- /dev/null +++ b/src/mistralai/client/models/messageoutputentry.py @@ -0,0 +1,121 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: a07577d2268d + +from __future__ import annotations +from .messageoutputcontentchunks import ( + MessageOutputContentChunks, + MessageOutputContentChunksTypedDict, +) +from datetime import datetime +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +MessageOutputEntryContentTypedDict = TypeAliasType( + "MessageOutputEntryContentTypedDict", + Union[str, List[MessageOutputContentChunksTypedDict]], +) + + +MessageOutputEntryContent = TypeAliasType( + "MessageOutputEntryContent", Union[str, List[MessageOutputContentChunks]] +) + + +class MessageOutputEntryTypedDict(TypedDict): + content: MessageOutputEntryContentTypedDict + object: Literal["entry"] + type: Literal["message.output"] + created_at: NotRequired[datetime] + completed_at: NotRequired[Nullable[datetime]] + agent_id: NotRequired[Nullable[str]] + model: NotRequired[Nullable[str]] + id: NotRequired[str] + role: Literal["assistant"] + + +class MessageOutputEntry(BaseModel): + content: MessageOutputEntryContent + + object: Annotated[ + Annotated[Optional[Literal["entry"]], AfterValidator(validate_const("entry"))], + pydantic.Field(alias="object"), + ] = "entry" + + type: Annotated[ + Annotated[ + Optional[Literal["message.output"]], + AfterValidator(validate_const("message.output")), + ], + pydantic.Field(alias="type"), + ] = "message.output" + + created_at: Optional[datetime] = None + + completed_at: OptionalNullable[datetime] = UNSET + + agent_id: OptionalNullable[str] = UNSET + + model: OptionalNullable[str] = UNSET + + id: Optional[str] = None + + role: Annotated[ + Annotated[ + Optional[Literal["assistant"]], AfterValidator(validate_const("assistant")) + ], + pydantic.Field(alias="role"), + ] = "assistant" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "object", + "type", + "created_at", + "completed_at", + "agent_id", + "model", + "id", + "role", + ] + ) + nullable_fields = set(["completed_at", "agent_id", "model"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + MessageOutputEntry.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/messageoutputevent.py b/src/mistralai/client/models/messageoutputevent.py new file mode 100644 index 00000000..d765f4fd --- /dev/null +++ b/src/mistralai/client/models/messageoutputevent.py @@ -0,0 +1,105 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: a2bbf63615c6 + +from __future__ import annotations +from .outputcontentchunks import OutputContentChunks, OutputContentChunksTypedDict +from datetime import datetime +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +MessageOutputEventContentTypedDict = TypeAliasType( + "MessageOutputEventContentTypedDict", Union[str, OutputContentChunksTypedDict] +) + + +MessageOutputEventContent = TypeAliasType( + "MessageOutputEventContent", Union[str, OutputContentChunks] +) + + +class MessageOutputEventTypedDict(TypedDict): + id: str + content: MessageOutputEventContentTypedDict + type: Literal["message.output.delta"] + created_at: NotRequired[datetime] + output_index: NotRequired[int] + content_index: NotRequired[int] + model: NotRequired[Nullable[str]] + agent_id: NotRequired[Nullable[str]] + role: Literal["assistant"] + + +class MessageOutputEvent(BaseModel): + id: str + + content: MessageOutputEventContent + + type: Annotated[ + Annotated[ + Literal["message.output.delta"], + AfterValidator(validate_const("message.output.delta")), + ], + pydantic.Field(alias="type"), + ] = "message.output.delta" + + created_at: Optional[datetime] = None + + output_index: Optional[int] = 0 + + content_index: Optional[int] = 0 + + model: OptionalNullable[str] = UNSET + + agent_id: OptionalNullable[str] = UNSET + + role: Annotated[ + Annotated[ + Optional[Literal["assistant"]], AfterValidator(validate_const("assistant")) + ], + pydantic.Field(alias="role"), + ] = "assistant" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + ["created_at", "output_index", "content_index", "model", "agent_id", "role"] + ) + nullable_fields = set(["model", "agent_id"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + MessageOutputEvent.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/metric.py b/src/mistralai/client/models/metric.py new file mode 100644 index 00000000..1413f589 --- /dev/null +++ b/src/mistralai/client/models/metric.py @@ -0,0 +1,56 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: c6a65acdd1a2 + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class MetricTypedDict(TypedDict): + r"""Metrics at the step number during the fine-tuning job. Use these metrics to assess if the training is going smoothly (loss should decrease, token accuracy should increase).""" + + train_loss: NotRequired[Nullable[float]] + valid_loss: NotRequired[Nullable[float]] + valid_mean_token_accuracy: NotRequired[Nullable[float]] + + +class Metric(BaseModel): + r"""Metrics at the step number during the fine-tuning job. Use these metrics to assess if the training is going smoothly (loss should decrease, token accuracy should increase).""" + + train_loss: OptionalNullable[float] = UNSET + + valid_loss: OptionalNullable[float] = UNSET + + valid_mean_token_accuracy: OptionalNullable[float] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["train_loss", "valid_loss", "valid_mean_token_accuracy"]) + nullable_fields = set(["train_loss", "valid_loss", "valid_mean_token_accuracy"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/mistralpromptmode.py b/src/mistralai/client/models/mistralpromptmode.py new file mode 100644 index 00000000..9b91323e --- /dev/null +++ b/src/mistralai/client/models/mistralpromptmode.py @@ -0,0 +1,13 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 95abc4ec799a + +from __future__ import annotations +from mistralai.client.types import UnrecognizedStr +from typing import Literal, Union + + +MistralPromptMode = Union[Literal["reasoning",], UnrecognizedStr] +r"""Available options to the prompt_mode argument on the chat completion endpoint. +Values represent high-level intent. Assignment to actual SPs is handled internally. +System prompt may include knowledge cutoff date, model capabilities, tone to use, safety guidelines, etc. +""" diff --git a/src/mistralai/client/models/modelcapabilities.py b/src/mistralai/client/models/modelcapabilities.py new file mode 100644 index 00000000..d9293ccc --- /dev/null +++ b/src/mistralai/client/models/modelcapabilities.py @@ -0,0 +1,72 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 64d8a422ea29 + +from __future__ import annotations +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class ModelCapabilitiesTypedDict(TypedDict): + completion_chat: NotRequired[bool] + function_calling: NotRequired[bool] + completion_fim: NotRequired[bool] + fine_tuning: NotRequired[bool] + vision: NotRequired[bool] + ocr: NotRequired[bool] + classification: NotRequired[bool] + moderation: NotRequired[bool] + audio: NotRequired[bool] + audio_transcription: NotRequired[bool] + + +class ModelCapabilities(BaseModel): + completion_chat: Optional[bool] = False + + function_calling: Optional[bool] = False + + completion_fim: Optional[bool] = False + + fine_tuning: Optional[bool] = False + + vision: Optional[bool] = False + + ocr: Optional[bool] = False + + classification: Optional[bool] = False + + moderation: Optional[bool] = False + + audio: Optional[bool] = False + + audio_transcription: Optional[bool] = False + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "completion_chat", + "function_calling", + "completion_fim", + "fine_tuning", + "vision", + "ocr", + "classification", + "moderation", + "audio", + "audio_transcription", + ] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/src/mistralai/client/models/modelconversation.py b/src/mistralai/client/models/modelconversation.py new file mode 100644 index 00000000..bb33d2e0 --- /dev/null +++ b/src/mistralai/client/models/modelconversation.py @@ -0,0 +1,179 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: fea0a651f888 + +from __future__ import annotations +from .codeinterpretertool import CodeInterpreterTool, CodeInterpreterToolTypedDict +from .completionargs import CompletionArgs, CompletionArgsTypedDict +from .documentlibrarytool import DocumentLibraryTool, DocumentLibraryToolTypedDict +from .functiontool import FunctionTool, FunctionToolTypedDict +from .imagegenerationtool import ImageGenerationTool, ImageGenerationToolTypedDict +from .websearchpremiumtool import WebSearchPremiumTool, WebSearchPremiumToolTypedDict +from .websearchtool import WebSearchTool, WebSearchToolTypedDict +from datetime import datetime +from functools import partial +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import validate_const +from mistralai.client.utils.unions import parse_open_union +import pydantic +from pydantic import ConfigDict, model_serializer +from pydantic.functional_validators import AfterValidator, BeforeValidator +from typing import Any, Dict, List, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +ModelConversationToolTypedDict = TypeAliasType( + "ModelConversationToolTypedDict", + Union[ + FunctionToolTypedDict, + WebSearchToolTypedDict, + WebSearchPremiumToolTypedDict, + CodeInterpreterToolTypedDict, + ImageGenerationToolTypedDict, + DocumentLibraryToolTypedDict, + ], +) + + +class UnknownModelConversationTool(BaseModel): + r"""A ModelConversationTool variant the SDK doesn't recognize. Preserves the raw payload.""" + + type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_MODEL_CONVERSATION_TOOL_VARIANTS: dict[str, Any] = { + "code_interpreter": CodeInterpreterTool, + "document_library": DocumentLibraryTool, + "function": FunctionTool, + "image_generation": ImageGenerationTool, + "web_search": WebSearchTool, + "web_search_premium": WebSearchPremiumTool, +} + + +ModelConversationTool = Annotated[ + Union[ + CodeInterpreterTool, + DocumentLibraryTool, + FunctionTool, + ImageGenerationTool, + WebSearchTool, + WebSearchPremiumTool, + UnknownModelConversationTool, + ], + BeforeValidator( + partial( + parse_open_union, + disc_key="type", + variants=_MODEL_CONVERSATION_TOOL_VARIANTS, + unknown_cls=UnknownModelConversationTool, + union_name="ModelConversationTool", + ) + ), +] + + +class ModelConversationTypedDict(TypedDict): + id: str + created_at: datetime + updated_at: datetime + model: str + instructions: NotRequired[Nullable[str]] + r"""Instruction prompt the model will follow during the conversation.""" + tools: NotRequired[List[ModelConversationToolTypedDict]] + r"""List of tools which are available to the model during the conversation.""" + completion_args: NotRequired[CompletionArgsTypedDict] + r"""White-listed arguments from the completion API""" + name: NotRequired[Nullable[str]] + r"""Name given to the conversation.""" + description: NotRequired[Nullable[str]] + r"""Description of the what the conversation is about.""" + metadata: NotRequired[Nullable[Dict[str, Any]]] + r"""Custom metadata for the conversation.""" + object: Literal["conversation"] + + +class ModelConversation(BaseModel): + id: str + + created_at: datetime + + updated_at: datetime + + model: str + + instructions: OptionalNullable[str] = UNSET + r"""Instruction prompt the model will follow during the conversation.""" + + tools: Optional[List[ModelConversationTool]] = None + r"""List of tools which are available to the model during the conversation.""" + + completion_args: Optional[CompletionArgs] = None + r"""White-listed arguments from the completion API""" + + name: OptionalNullable[str] = UNSET + r"""Name given to the conversation.""" + + description: OptionalNullable[str] = UNSET + r"""Description of the what the conversation is about.""" + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + r"""Custom metadata for the conversation.""" + + object: Annotated[ + Annotated[ + Optional[Literal["conversation"]], + AfterValidator(validate_const("conversation")), + ], + pydantic.Field(alias="object"), + ] = "conversation" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "instructions", + "tools", + "completion_args", + "name", + "description", + "metadata", + "object", + ] + ) + nullable_fields = set(["instructions", "name", "description", "metadata"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + ModelConversation.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/modellist.py b/src/mistralai/client/models/modellist.py new file mode 100644 index 00000000..5fd835f2 --- /dev/null +++ b/src/mistralai/client/models/modellist.py @@ -0,0 +1,75 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 00693c7eec60 + +from __future__ import annotations +from .basemodelcard import BaseModelCard, BaseModelCardTypedDict +from .ftmodelcard import FTModelCard, FTModelCardTypedDict +from functools import partial +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils.unions import parse_open_union +from pydantic import ConfigDict, model_serializer +from pydantic.functional_validators import BeforeValidator +from typing import Any, List, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +ModelListDataTypedDict = TypeAliasType( + "ModelListDataTypedDict", Union[BaseModelCardTypedDict, FTModelCardTypedDict] +) + + +class UnknownModelListData(BaseModel): + r"""A ModelListData variant the SDK doesn't recognize. Preserves the raw payload.""" + + type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_MODEL_LIST_DATA_VARIANTS: dict[str, Any] = { + "base": BaseModelCard, + "fine-tuned": FTModelCard, +} + + +ModelListData = Annotated[ + Union[BaseModelCard, FTModelCard, UnknownModelListData], + BeforeValidator( + partial( + parse_open_union, + disc_key="type", + variants=_MODEL_LIST_DATA_VARIANTS, + unknown_cls=UnknownModelListData, + union_name="ModelListData", + ) + ), +] + + +class ModelListTypedDict(TypedDict): + object: NotRequired[str] + data: NotRequired[List[ModelListDataTypedDict]] + + +class ModelList(BaseModel): + object: Optional[str] = "list" + + data: Optional[List[ModelListData]] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["object", "data"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/src/mistralai/client/models/moderationobject.py b/src/mistralai/client/models/moderationobject.py new file mode 100644 index 00000000..e7ccd8f6 --- /dev/null +++ b/src/mistralai/client/models/moderationobject.py @@ -0,0 +1,39 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 132faad0549a + +from __future__ import annotations +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Dict, Optional +from typing_extensions import NotRequired, TypedDict + + +class ModerationObjectTypedDict(TypedDict): + categories: NotRequired[Dict[str, bool]] + r"""Moderation result thresholds""" + category_scores: NotRequired[Dict[str, float]] + r"""Moderation result""" + + +class ModerationObject(BaseModel): + categories: Optional[Dict[str, bool]] = None + r"""Moderation result thresholds""" + + category_scores: Optional[Dict[str, float]] = None + r"""Moderation result""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["categories", "category_scores"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/src/mistralai/client/models/moderationresponse.py b/src/mistralai/client/models/moderationresponse.py new file mode 100644 index 00000000..a8a8ec3d --- /dev/null +++ b/src/mistralai/client/models/moderationresponse.py @@ -0,0 +1,22 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 06bab279cb31 + +from __future__ import annotations +from .moderationobject import ModerationObject, ModerationObjectTypedDict +from mistralai.client.types import BaseModel +from typing import List +from typing_extensions import TypedDict + + +class ModerationResponseTypedDict(TypedDict): + id: str + model: str + results: List[ModerationObjectTypedDict] + + +class ModerationResponse(BaseModel): + id: str + + model: str + + results: List[ModerationObject] diff --git a/src/mistralai/client/models/ocrimageobject.py b/src/mistralai/client/models/ocrimageobject.py new file mode 100644 index 00000000..365f062b --- /dev/null +++ b/src/mistralai/client/models/ocrimageobject.py @@ -0,0 +1,87 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 685faeb41a80 + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class OCRImageObjectTypedDict(TypedDict): + id: str + r"""Image ID for extracted image in a page""" + top_left_x: Nullable[int] + r"""X coordinate of top-left corner of the extracted image""" + top_left_y: Nullable[int] + r"""Y coordinate of top-left corner of the extracted image""" + bottom_right_x: Nullable[int] + r"""X coordinate of bottom-right corner of the extracted image""" + bottom_right_y: Nullable[int] + r"""Y coordinate of bottom-right corner of the extracted image""" + image_base64: NotRequired[Nullable[str]] + r"""Base64 string of the extracted image""" + image_annotation: NotRequired[Nullable[str]] + r"""Annotation of the extracted image in json str""" + + +class OCRImageObject(BaseModel): + id: str + r"""Image ID for extracted image in a page""" + + top_left_x: Nullable[int] + r"""X coordinate of top-left corner of the extracted image""" + + top_left_y: Nullable[int] + r"""Y coordinate of top-left corner of the extracted image""" + + bottom_right_x: Nullable[int] + r"""X coordinate of bottom-right corner of the extracted image""" + + bottom_right_y: Nullable[int] + r"""Y coordinate of bottom-right corner of the extracted image""" + + image_base64: OptionalNullable[str] = UNSET + r"""Base64 string of the extracted image""" + + image_annotation: OptionalNullable[str] = UNSET + r"""Annotation of the extracted image in json str""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["image_base64", "image_annotation"]) + nullable_fields = set( + [ + "top_left_x", + "top_left_y", + "bottom_right_x", + "bottom_right_y", + "image_base64", + "image_annotation", + ] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/ocrpagedimensions.py b/src/mistralai/client/models/ocrpagedimensions.py new file mode 100644 index 00000000..847205c6 --- /dev/null +++ b/src/mistralai/client/models/ocrpagedimensions.py @@ -0,0 +1,26 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 02f763afbc9f + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing_extensions import TypedDict + + +class OCRPageDimensionsTypedDict(TypedDict): + dpi: int + r"""Dots per inch of the page-image""" + height: int + r"""Height of the image in pixels""" + width: int + r"""Width of the image in pixels""" + + +class OCRPageDimensions(BaseModel): + dpi: int + r"""Dots per inch of the page-image""" + + height: int + r"""Height of the image in pixels""" + + width: int + r"""Width of the image in pixels""" diff --git a/src/mistralai/client/models/ocrpageobject.py b/src/mistralai/client/models/ocrpageobject.py new file mode 100644 index 00000000..ffc7b3b6 --- /dev/null +++ b/src/mistralai/client/models/ocrpageobject.py @@ -0,0 +1,87 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 07a099f89487 + +from __future__ import annotations +from .ocrimageobject import OCRImageObject, OCRImageObjectTypedDict +from .ocrpagedimensions import OCRPageDimensions, OCRPageDimensionsTypedDict +from .ocrtableobject import OCRTableObject, OCRTableObjectTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List, Optional +from typing_extensions import NotRequired, TypedDict + + +class OCRPageObjectTypedDict(TypedDict): + index: int + r"""The page index in a pdf document starting from 0""" + markdown: str + r"""The markdown string response of the page""" + images: List[OCRImageObjectTypedDict] + r"""List of all extracted images in the page""" + dimensions: Nullable[OCRPageDimensionsTypedDict] + r"""The dimensions of the PDF Page's screenshot image""" + tables: NotRequired[List[OCRTableObjectTypedDict]] + r"""List of all extracted tables in the page""" + hyperlinks: NotRequired[List[str]] + r"""List of all hyperlinks in the page""" + header: NotRequired[Nullable[str]] + r"""Header of the page""" + footer: NotRequired[Nullable[str]] + r"""Footer of the page""" + + +class OCRPageObject(BaseModel): + index: int + r"""The page index in a pdf document starting from 0""" + + markdown: str + r"""The markdown string response of the page""" + + images: List[OCRImageObject] + r"""List of all extracted images in the page""" + + dimensions: Nullable[OCRPageDimensions] + r"""The dimensions of the PDF Page's screenshot image""" + + tables: Optional[List[OCRTableObject]] = None + r"""List of all extracted tables in the page""" + + hyperlinks: Optional[List[str]] = None + r"""List of all hyperlinks in the page""" + + header: OptionalNullable[str] = UNSET + r"""Header of the page""" + + footer: OptionalNullable[str] = UNSET + r"""Footer of the page""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["tables", "hyperlinks", "header", "footer"]) + nullable_fields = set(["header", "footer", "dimensions"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/ocrrequest.py b/src/mistralai/client/models/ocrrequest.py new file mode 100644 index 00000000..4ad337ce --- /dev/null +++ b/src/mistralai/client/models/ocrrequest.py @@ -0,0 +1,148 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 36f204c64074 + +from __future__ import annotations +from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict +from .filechunk import FileChunk, FileChunkTypedDict +from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict +from .responseformat import ResponseFormat, ResponseFormatTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List, Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +DocumentUnionTypedDict = TypeAliasType( + "DocumentUnionTypedDict", + Union[FileChunkTypedDict, ImageURLChunkTypedDict, DocumentURLChunkTypedDict], +) +r"""Document to run OCR on""" + + +DocumentUnion = TypeAliasType( + "DocumentUnion", Union[FileChunk, ImageURLChunk, DocumentURLChunk] +) +r"""Document to run OCR on""" + + +TableFormat = Literal[ + "markdown", + "html", +] + + +class OCRRequestTypedDict(TypedDict): + model: Nullable[str] + document: DocumentUnionTypedDict + r"""Document to run OCR on""" + id: NotRequired[str] + pages: NotRequired[Nullable[List[int]]] + r"""Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0""" + include_image_base64: NotRequired[Nullable[bool]] + r"""Include image URLs in response""" + image_limit: NotRequired[Nullable[int]] + r"""Max images to extract""" + image_min_size: NotRequired[Nullable[int]] + r"""Minimum height and width of image to extract""" + bbox_annotation_format: NotRequired[Nullable[ResponseFormatTypedDict]] + r"""Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field""" + document_annotation_format: NotRequired[Nullable[ResponseFormatTypedDict]] + r"""Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field""" + document_annotation_prompt: NotRequired[Nullable[str]] + r"""Optional prompt to guide the model in extracting structured output from the entire document. A document_annotation_format must be provided.""" + table_format: NotRequired[Nullable[TableFormat]] + extract_header: NotRequired[bool] + extract_footer: NotRequired[bool] + + +class OCRRequest(BaseModel): + model: Nullable[str] + + document: DocumentUnion + r"""Document to run OCR on""" + + id: Optional[str] = None + + pages: OptionalNullable[List[int]] = UNSET + r"""Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0""" + + include_image_base64: OptionalNullable[bool] = UNSET + r"""Include image URLs in response""" + + image_limit: OptionalNullable[int] = UNSET + r"""Max images to extract""" + + image_min_size: OptionalNullable[int] = UNSET + r"""Minimum height and width of image to extract""" + + bbox_annotation_format: OptionalNullable[ResponseFormat] = UNSET + r"""Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field""" + + document_annotation_format: OptionalNullable[ResponseFormat] = UNSET + r"""Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field""" + + document_annotation_prompt: OptionalNullable[str] = UNSET + r"""Optional prompt to guide the model in extracting structured output from the entire document. A document_annotation_format must be provided.""" + + table_format: OptionalNullable[TableFormat] = UNSET + + extract_header: Optional[bool] = None + + extract_footer: Optional[bool] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "id", + "pages", + "include_image_base64", + "image_limit", + "image_min_size", + "bbox_annotation_format", + "document_annotation_format", + "document_annotation_prompt", + "table_format", + "extract_header", + "extract_footer", + ] + ) + nullable_fields = set( + [ + "model", + "pages", + "include_image_base64", + "image_limit", + "image_min_size", + "bbox_annotation_format", + "document_annotation_format", + "document_annotation_prompt", + "table_format", + ] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/ocrresponse.py b/src/mistralai/client/models/ocrresponse.py new file mode 100644 index 00000000..e63eed98 --- /dev/null +++ b/src/mistralai/client/models/ocrresponse.py @@ -0,0 +1,64 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 2fdfc881ca56 + +from __future__ import annotations +from .ocrpageobject import OCRPageObject, OCRPageObjectTypedDict +from .ocrusageinfo import OCRUsageInfo, OCRUsageInfoTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List +from typing_extensions import NotRequired, TypedDict + + +class OCRResponseTypedDict(TypedDict): + pages: List[OCRPageObjectTypedDict] + r"""List of OCR info for pages.""" + model: str + r"""The model used to generate the OCR.""" + usage_info: OCRUsageInfoTypedDict + document_annotation: NotRequired[Nullable[str]] + r"""Formatted response in the request_format if provided in json str""" + + +class OCRResponse(BaseModel): + pages: List[OCRPageObject] + r"""List of OCR info for pages.""" + + model: str + r"""The model used to generate the OCR.""" + + usage_info: OCRUsageInfo + + document_annotation: OptionalNullable[str] = UNSET + r"""Formatted response in the request_format if provided in json str""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["document_annotation"]) + nullable_fields = set(["document_annotation"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/ocrtableobject.py b/src/mistralai/client/models/ocrtableobject.py new file mode 100644 index 00000000..66bb050f --- /dev/null +++ b/src/mistralai/client/models/ocrtableobject.py @@ -0,0 +1,44 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: d74dd0d2ddac + +from __future__ import annotations +from mistralai.client.types import BaseModel, UnrecognizedStr +import pydantic +from typing import Literal, Union +from typing_extensions import Annotated, TypedDict + + +Format = Union[ + Literal[ + "markdown", + "html", + ], + UnrecognizedStr, +] +r"""Format of the table""" + + +class OCRTableObjectTypedDict(TypedDict): + id: str + r"""Table ID for extracted table in a page""" + content: str + r"""Content of the table in the given format""" + format_: Format + r"""Format of the table""" + + +class OCRTableObject(BaseModel): + id: str + r"""Table ID for extracted table in a page""" + + content: str + r"""Content of the table in the given format""" + + format_: Annotated[Format, pydantic.Field(alias="format")] + r"""Format of the table""" + + +try: + OCRTableObject.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/ocrusageinfo.py b/src/mistralai/client/models/ocrusageinfo.py new file mode 100644 index 00000000..2ec1322b --- /dev/null +++ b/src/mistralai/client/models/ocrusageinfo.py @@ -0,0 +1,53 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 272b7e1785d5 + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class OCRUsageInfoTypedDict(TypedDict): + pages_processed: int + r"""Number of pages processed""" + doc_size_bytes: NotRequired[Nullable[int]] + r"""Document size in bytes""" + + +class OCRUsageInfo(BaseModel): + pages_processed: int + r"""Number of pages processed""" + + doc_size_bytes: OptionalNullable[int] = UNSET + r"""Document size in bytes""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["doc_size_bytes"]) + nullable_fields = set(["doc_size_bytes"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/outputcontentchunks.py b/src/mistralai/client/models/outputcontentchunks.py new file mode 100644 index 00000000..fab7907b --- /dev/null +++ b/src/mistralai/client/models/outputcontentchunks.py @@ -0,0 +1,41 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 9ad9741f4975 + +from __future__ import annotations +from .conversationthinkchunk import ( + ConversationThinkChunk, + ConversationThinkChunkTypedDict, +) +from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict +from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict +from .textchunk import TextChunk, TextChunkTypedDict +from .toolfilechunk import ToolFileChunk, ToolFileChunkTypedDict +from .toolreferencechunk import ToolReferenceChunk, ToolReferenceChunkTypedDict +from typing import Union +from typing_extensions import TypeAliasType + + +OutputContentChunksTypedDict = TypeAliasType( + "OutputContentChunksTypedDict", + Union[ + TextChunkTypedDict, + ImageURLChunkTypedDict, + DocumentURLChunkTypedDict, + ConversationThinkChunkTypedDict, + ToolFileChunkTypedDict, + ToolReferenceChunkTypedDict, + ], +) + + +OutputContentChunks = TypeAliasType( + "OutputContentChunks", + Union[ + TextChunk, + ImageURLChunk, + DocumentURLChunk, + ConversationThinkChunk, + ToolFileChunk, + ToolReferenceChunk, + ], +) diff --git a/src/mistralai/client/models/paginationinfo.py b/src/mistralai/client/models/paginationinfo.py new file mode 100644 index 00000000..2b9dab62 --- /dev/null +++ b/src/mistralai/client/models/paginationinfo.py @@ -0,0 +1,26 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 48851e82d67e + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing_extensions import TypedDict + + +class PaginationInfoTypedDict(TypedDict): + total_items: int + total_pages: int + current_page: int + page_size: int + has_more: bool + + +class PaginationInfo(BaseModel): + total_items: int + + total_pages: int + + current_page: int + + page_size: int + + has_more: bool diff --git a/src/mistralai/client/models/prediction.py b/src/mistralai/client/models/prediction.py new file mode 100644 index 00000000..0c6f4182 --- /dev/null +++ b/src/mistralai/client/models/prediction.py @@ -0,0 +1,53 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 1cc842a069a5 + +from __future__ import annotations +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class PredictionTypedDict(TypedDict): + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" + + type: Literal["content"] + content: NotRequired[str] + + +class Prediction(BaseModel): + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" + + type: Annotated[ + Annotated[ + Optional[Literal["content"]], AfterValidator(validate_const("content")) + ], + pydantic.Field(alias="type"), + ] = "content" + + content: Optional[str] = "" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type", "content"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + Prediction.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/processingstatusout.py b/src/mistralai/client/models/processingstatusout.py new file mode 100644 index 00000000..ed2a4f22 --- /dev/null +++ b/src/mistralai/client/models/processingstatusout.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 3df842c4140f + +from __future__ import annotations +from .processstatus import ProcessStatus +from mistralai.client.types import BaseModel +from typing_extensions import TypedDict + + +class ProcessingStatusOutTypedDict(TypedDict): + document_id: str + process_status: ProcessStatus + processing_status: str + + +class ProcessingStatusOut(BaseModel): + document_id: str + + process_status: ProcessStatus + + processing_status: str diff --git a/src/mistralai/client/models/processstatus.py b/src/mistralai/client/models/processstatus.py new file mode 100644 index 00000000..15bdce20 --- /dev/null +++ b/src/mistralai/client/models/processstatus.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 0205512146e6 + +from __future__ import annotations +from mistralai.client.types import UnrecognizedStr +from typing import Literal, Union + + +ProcessStatus = Union[ + Literal[ + "self_managed", + "missing_content", + "noop", + "done", + "todo", + "in_progress", + "error", + "waiting_for_capacity", + ], + UnrecognizedStr, +] diff --git a/src/mistralai/client/models/realtimetranscriptionerror.py b/src/mistralai/client/models/realtimetranscriptionerror.py new file mode 100644 index 00000000..c661e461 --- /dev/null +++ b/src/mistralai/client/models/realtimetranscriptionerror.py @@ -0,0 +1,51 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 8c2267378f48 + +from __future__ import annotations +from .realtimetranscriptionerrordetail import ( + RealtimeTranscriptionErrorDetail, + RealtimeTranscriptionErrorDetailTypedDict, +) +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, TypedDict + + +class RealtimeTranscriptionErrorTypedDict(TypedDict): + error: RealtimeTranscriptionErrorDetailTypedDict + type: Literal["error"] + + +class RealtimeTranscriptionError(BaseModel): + error: RealtimeTranscriptionErrorDetail + + type: Annotated[ + Annotated[Optional[Literal["error"]], AfterValidator(validate_const("error"))], + pydantic.Field(alias="type"), + ] = "error" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + RealtimeTranscriptionError.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/realtimetranscriptionerrordetail.py b/src/mistralai/client/models/realtimetranscriptionerrordetail.py new file mode 100644 index 00000000..cec1f6ea --- /dev/null +++ b/src/mistralai/client/models/realtimetranscriptionerrordetail.py @@ -0,0 +1,34 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 5bd25cdf9c7a + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing import Any, Dict, Union +from typing_extensions import TypeAliasType, TypedDict + + +RealtimeTranscriptionErrorDetailMessageTypedDict = TypeAliasType( + "RealtimeTranscriptionErrorDetailMessageTypedDict", Union[str, Dict[str, Any]] +) +r"""Human-readable error message.""" + + +RealtimeTranscriptionErrorDetailMessage = TypeAliasType( + "RealtimeTranscriptionErrorDetailMessage", Union[str, Dict[str, Any]] +) +r"""Human-readable error message.""" + + +class RealtimeTranscriptionErrorDetailTypedDict(TypedDict): + message: RealtimeTranscriptionErrorDetailMessageTypedDict + r"""Human-readable error message.""" + code: int + r"""Internal error code for debugging.""" + + +class RealtimeTranscriptionErrorDetail(BaseModel): + message: RealtimeTranscriptionErrorDetailMessage + r"""Human-readable error message.""" + + code: int + r"""Internal error code for debugging.""" diff --git a/src/mistralai/client/models/realtimetranscriptioninputaudioappend.py b/src/mistralai/client/models/realtimetranscriptioninputaudioappend.py new file mode 100644 index 00000000..8156a270 --- /dev/null +++ b/src/mistralai/client/models/realtimetranscriptioninputaudioappend.py @@ -0,0 +1,52 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 8b03cde6e115 + +from __future__ import annotations +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, TypedDict + + +class RealtimeTranscriptionInputAudioAppendTypedDict(TypedDict): + audio: str + r"""Base64-encoded raw PCM bytes matching the current audio_format. Max decoded size: 262144 bytes.""" + type: Literal["input_audio.append"] + + +class RealtimeTranscriptionInputAudioAppend(BaseModel): + audio: str + r"""Base64-encoded raw PCM bytes matching the current audio_format. Max decoded size: 262144 bytes.""" + + type: Annotated[ + Annotated[ + Optional[Literal["input_audio.append"]], + AfterValidator(validate_const("input_audio.append")), + ], + pydantic.Field(alias="type"), + ] = "input_audio.append" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + RealtimeTranscriptionInputAudioAppend.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/realtimetranscriptioninputaudioend.py b/src/mistralai/client/models/realtimetranscriptioninputaudioend.py new file mode 100644 index 00000000..473eedb7 --- /dev/null +++ b/src/mistralai/client/models/realtimetranscriptioninputaudioend.py @@ -0,0 +1,47 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: c187ba1b551d + +from __future__ import annotations +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, TypedDict + + +class RealtimeTranscriptionInputAudioEndTypedDict(TypedDict): + type: Literal["input_audio.end"] + + +class RealtimeTranscriptionInputAudioEnd(BaseModel): + type: Annotated[ + Annotated[ + Optional[Literal["input_audio.end"]], + AfterValidator(validate_const("input_audio.end")), + ], + pydantic.Field(alias="type"), + ] = "input_audio.end" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + RealtimeTranscriptionInputAudioEnd.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/realtimetranscriptioninputaudioflush.py b/src/mistralai/client/models/realtimetranscriptioninputaudioflush.py new file mode 100644 index 00000000..553d14c7 --- /dev/null +++ b/src/mistralai/client/models/realtimetranscriptioninputaudioflush.py @@ -0,0 +1,47 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: b27b600c310e + +from __future__ import annotations +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, TypedDict + + +class RealtimeTranscriptionInputAudioFlushTypedDict(TypedDict): + type: Literal["input_audio.flush"] + + +class RealtimeTranscriptionInputAudioFlush(BaseModel): + type: Annotated[ + Annotated[ + Optional[Literal["input_audio.flush"]], + AfterValidator(validate_const("input_audio.flush")), + ], + pydantic.Field(alias="type"), + ] = "input_audio.flush" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + RealtimeTranscriptionInputAudioFlush.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/realtimetranscriptionsession.py b/src/mistralai/client/models/realtimetranscriptionsession.py new file mode 100644 index 00000000..a74a457b --- /dev/null +++ b/src/mistralai/client/models/realtimetranscriptionsession.py @@ -0,0 +1,56 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 02517fa5411a + +from __future__ import annotations +from .audioformat import AudioFormat, AudioFormatTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class RealtimeTranscriptionSessionTypedDict(TypedDict): + request_id: str + model: str + audio_format: AudioFormatTypedDict + target_streaming_delay_ms: NotRequired[Nullable[int]] + + +class RealtimeTranscriptionSession(BaseModel): + request_id: str + + model: str + + audio_format: AudioFormat + + target_streaming_delay_ms: OptionalNullable[int] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["target_streaming_delay_ms"]) + nullable_fields = set(["target_streaming_delay_ms"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/realtimetranscriptionsessioncreated.py b/src/mistralai/client/models/realtimetranscriptionsessioncreated.py new file mode 100644 index 00000000..bb96875a --- /dev/null +++ b/src/mistralai/client/models/realtimetranscriptionsessioncreated.py @@ -0,0 +1,54 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 4e3731f63a3c + +from __future__ import annotations +from .realtimetranscriptionsession import ( + RealtimeTranscriptionSession, + RealtimeTranscriptionSessionTypedDict, +) +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, TypedDict + + +class RealtimeTranscriptionSessionCreatedTypedDict(TypedDict): + session: RealtimeTranscriptionSessionTypedDict + type: Literal["session.created"] + + +class RealtimeTranscriptionSessionCreated(BaseModel): + session: RealtimeTranscriptionSession + + type: Annotated[ + Annotated[ + Optional[Literal["session.created"]], + AfterValidator(validate_const("session.created")), + ], + pydantic.Field(alias="type"), + ] = "session.created" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + RealtimeTranscriptionSessionCreated.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/realtimetranscriptionsessionupdated.py b/src/mistralai/client/models/realtimetranscriptionsessionupdated.py new file mode 100644 index 00000000..fea5db4a --- /dev/null +++ b/src/mistralai/client/models/realtimetranscriptionsessionupdated.py @@ -0,0 +1,54 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 686dc4f2450f + +from __future__ import annotations +from .realtimetranscriptionsession import ( + RealtimeTranscriptionSession, + RealtimeTranscriptionSessionTypedDict, +) +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, TypedDict + + +class RealtimeTranscriptionSessionUpdatedTypedDict(TypedDict): + session: RealtimeTranscriptionSessionTypedDict + type: Literal["session.updated"] + + +class RealtimeTranscriptionSessionUpdated(BaseModel): + session: RealtimeTranscriptionSession + + type: Annotated[ + Annotated[ + Optional[Literal["session.updated"]], + AfterValidator(validate_const("session.updated")), + ], + pydantic.Field(alias="type"), + ] = "session.updated" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + RealtimeTranscriptionSessionUpdated.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/realtimetranscriptionsessionupdatemessage.py b/src/mistralai/client/models/realtimetranscriptionsessionupdatemessage.py new file mode 100644 index 00000000..07ad59a4 --- /dev/null +++ b/src/mistralai/client/models/realtimetranscriptionsessionupdatemessage.py @@ -0,0 +1,54 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 4e1b3fd7c5a3 + +from __future__ import annotations +from .realtimetranscriptionsessionupdatepayload import ( + RealtimeTranscriptionSessionUpdatePayload, + RealtimeTranscriptionSessionUpdatePayloadTypedDict, +) +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, TypedDict + + +class RealtimeTranscriptionSessionUpdateMessageTypedDict(TypedDict): + session: RealtimeTranscriptionSessionUpdatePayloadTypedDict + type: Literal["session.update"] + + +class RealtimeTranscriptionSessionUpdateMessage(BaseModel): + session: RealtimeTranscriptionSessionUpdatePayload + + type: Annotated[ + Annotated[ + Optional[Literal["session.update"]], + AfterValidator(validate_const("session.update")), + ], + pydantic.Field(alias="type"), + ] = "session.update" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + RealtimeTranscriptionSessionUpdateMessage.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/realtimetranscriptionsessionupdatepayload.py b/src/mistralai/client/models/realtimetranscriptionsessionupdatepayload.py new file mode 100644 index 00000000..a89441e9 --- /dev/null +++ b/src/mistralai/client/models/realtimetranscriptionsessionupdatepayload.py @@ -0,0 +1,54 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 7033fdb33ad4 + +from __future__ import annotations +from .audioformat import AudioFormat, AudioFormatTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class RealtimeTranscriptionSessionUpdatePayloadTypedDict(TypedDict): + audio_format: NotRequired[Nullable[AudioFormatTypedDict]] + r"""Set before sending audio. Audio format updates are rejected after audio starts.""" + target_streaming_delay_ms: NotRequired[Nullable[int]] + r"""Set before sending audio. Streaming delay updates are rejected after audio starts.""" + + +class RealtimeTranscriptionSessionUpdatePayload(BaseModel): + audio_format: OptionalNullable[AudioFormat] = UNSET + r"""Set before sending audio. Audio format updates are rejected after audio starts.""" + + target_streaming_delay_ms: OptionalNullable[int] = UNSET + r"""Set before sending audio. Streaming delay updates are rejected after audio starts.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["audio_format", "target_streaming_delay_ms"]) + nullable_fields = set(["audio_format", "target_streaming_delay_ms"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/referencechunk.py b/src/mistralai/client/models/referencechunk.py new file mode 100644 index 00000000..e0bbae4e --- /dev/null +++ b/src/mistralai/client/models/referencechunk.py @@ -0,0 +1,49 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 921acd3a224a + +from __future__ import annotations +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional +from typing_extensions import Annotated, TypedDict + + +class ReferenceChunkTypedDict(TypedDict): + reference_ids: List[int] + type: Literal["reference"] + + +class ReferenceChunk(BaseModel): + reference_ids: List[int] + + type: Annotated[ + Annotated[ + Optional[Literal["reference"]], AfterValidator(validate_const("reference")) + ], + pydantic.Field(alias="type"), + ] = "reference" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ReferenceChunk.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/requestsource.py b/src/mistralai/client/models/requestsource.py new file mode 100644 index 00000000..fc4433cb --- /dev/null +++ b/src/mistralai/client/models/requestsource.py @@ -0,0 +1,12 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 3f2774d9e609 + +from __future__ import annotations +from typing import Literal + + +RequestSource = Literal[ + "api", + "playground", + "agent_builder_v1", +] diff --git a/src/mistralai/client/models/responsedoneevent.py b/src/mistralai/client/models/responsedoneevent.py new file mode 100644 index 00000000..be38fba8 --- /dev/null +++ b/src/mistralai/client/models/responsedoneevent.py @@ -0,0 +1,55 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: cf8a686bf82c + +from __future__ import annotations +from .conversationusageinfo import ConversationUsageInfo, ConversationUsageInfoTypedDict +from datetime import datetime +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class ResponseDoneEventTypedDict(TypedDict): + usage: ConversationUsageInfoTypedDict + type: Literal["conversation.response.done"] + created_at: NotRequired[datetime] + + +class ResponseDoneEvent(BaseModel): + usage: ConversationUsageInfo + + type: Annotated[ + Annotated[ + Literal["conversation.response.done"], + AfterValidator(validate_const("conversation.response.done")), + ], + pydantic.Field(alias="type"), + ] = "conversation.response.done" + + created_at: Optional[datetime] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["created_at"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ResponseDoneEvent.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/responseerrorevent.py b/src/mistralai/client/models/responseerrorevent.py new file mode 100644 index 00000000..fa4d0d01 --- /dev/null +++ b/src/mistralai/client/models/responseerrorevent.py @@ -0,0 +1,57 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: b286d74e8724 + +from __future__ import annotations +from datetime import datetime +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class ResponseErrorEventTypedDict(TypedDict): + message: str + code: int + type: Literal["conversation.response.error"] + created_at: NotRequired[datetime] + + +class ResponseErrorEvent(BaseModel): + message: str + + code: int + + type: Annotated[ + Annotated[ + Literal["conversation.response.error"], + AfterValidator(validate_const("conversation.response.error")), + ], + pydantic.Field(alias="type"), + ] = "conversation.response.error" + + created_at: Optional[datetime] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["created_at"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ResponseErrorEvent.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/responseformat.py b/src/mistralai/client/models/responseformat.py new file mode 100644 index 00000000..b2971412 --- /dev/null +++ b/src/mistralai/client/models/responseformat.py @@ -0,0 +1,56 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 6ab8bc8d22c0 + +from __future__ import annotations +from .jsonschema import JSONSchema, JSONSchemaTypedDict +from .responseformats import ResponseFormats +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class ResponseFormatTypedDict(TypedDict): + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" + + type: NotRequired[ResponseFormats] + json_schema: NotRequired[Nullable[JSONSchemaTypedDict]] + + +class ResponseFormat(BaseModel): + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" + + type: Optional[ResponseFormats] = None + + json_schema: OptionalNullable[JSONSchema] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type", "json_schema"]) + nullable_fields = set(["json_schema"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/responseformats.py b/src/mistralai/client/models/responseformats.py new file mode 100644 index 00000000..21345778 --- /dev/null +++ b/src/mistralai/client/models/responseformats.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: c4462a05fb08 + +from __future__ import annotations +from mistralai.client.types import UnrecognizedStr +from typing import Literal, Union + + +ResponseFormats = Union[ + Literal[ + "text", + "json_object", + "json_schema", + ], + UnrecognizedStr, +] diff --git a/src/mistralai/client/models/responsestartedevent.py b/src/mistralai/client/models/responsestartedevent.py new file mode 100644 index 00000000..84abfcd9 --- /dev/null +++ b/src/mistralai/client/models/responsestartedevent.py @@ -0,0 +1,54 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 24f54ee8b0f2 + +from __future__ import annotations +from datetime import datetime +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class ResponseStartedEventTypedDict(TypedDict): + conversation_id: str + type: Literal["conversation.response.started"] + created_at: NotRequired[datetime] + + +class ResponseStartedEvent(BaseModel): + conversation_id: str + + type: Annotated[ + Annotated[ + Literal["conversation.response.started"], + AfterValidator(validate_const("conversation.response.started")), + ], + pydantic.Field(alias="type"), + ] = "conversation.response.started" + + created_at: Optional[datetime] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["created_at"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ResponseStartedEvent.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/retrieve_model_v1_models_model_id_getop.py b/src/mistralai/client/models/retrieve_model_v1_models_model_id_getop.py new file mode 100644 index 00000000..cd5955c1 --- /dev/null +++ b/src/mistralai/client/models/retrieve_model_v1_models_model_id_getop.py @@ -0,0 +1,64 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 6fefa90ca351 + +from __future__ import annotations +from .basemodelcard import BaseModelCard, BaseModelCardTypedDict +from .ftmodelcard import FTModelCard, FTModelCardTypedDict +from functools import partial +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from mistralai.client.utils.unions import parse_open_union +from pydantic import ConfigDict +from pydantic.functional_validators import BeforeValidator +from typing import Any, Literal, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +class RetrieveModelV1ModelsModelIDGetRequestTypedDict(TypedDict): + model_id: str + r"""The ID of the model to retrieve.""" + + +class RetrieveModelV1ModelsModelIDGetRequest(BaseModel): + model_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + r"""The ID of the model to retrieve.""" + + +ResponseRetrieveModelV1ModelsModelIDGetTypedDict = TypeAliasType( + "ResponseRetrieveModelV1ModelsModelIDGetTypedDict", + Union[BaseModelCardTypedDict, FTModelCardTypedDict], +) +r"""Successful Response""" + + +class UnknownResponseRetrieveModelV1ModelsModelIDGet(BaseModel): + r"""A ResponseRetrieveModelV1ModelsModelIDGet variant the SDK doesn't recognize. Preserves the raw payload.""" + + type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_RESPONSE_RETRIEVE_MODEL_V1_MODELS_MODEL_ID_GET_VARIANTS: dict[str, Any] = { + "base": BaseModelCard, + "fine-tuned": FTModelCard, +} + + +ResponseRetrieveModelV1ModelsModelIDGet = Annotated[ + Union[BaseModelCard, FTModelCard, UnknownResponseRetrieveModelV1ModelsModelIDGet], + BeforeValidator( + partial( + parse_open_union, + disc_key="type", + variants=_RESPONSE_RETRIEVE_MODEL_V1_MODELS_MODEL_ID_GET_VARIANTS, + unknown_cls=UnknownResponseRetrieveModelV1ModelsModelIDGet, + union_name="ResponseRetrieveModelV1ModelsModelIDGet", + ) + ), +] +r"""Successful Response""" diff --git a/src/mistralai/client/models/sampletype.py b/src/mistralai/client/models/sampletype.py new file mode 100644 index 00000000..dfec7cce --- /dev/null +++ b/src/mistralai/client/models/sampletype.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: a9309422fed7 + +from __future__ import annotations +from mistralai.client.types import UnrecognizedStr +from typing import Literal, Union + + +SampleType = Union[ + Literal[ + "pretrain", + "instruct", + "batch_request", + "batch_result", + "batch_error", + ], + UnrecognizedStr, +] diff --git a/src/mistralai/client/models/security.py b/src/mistralai/client/models/security.py new file mode 100644 index 00000000..f3b3423e --- /dev/null +++ b/src/mistralai/client/models/security.py @@ -0,0 +1,43 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: c2ca0e2a36b7 + +from __future__ import annotations +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import FieldMetadata, SecurityMetadata +from pydantic import model_serializer +from typing import Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class SecurityTypedDict(TypedDict): + api_key: NotRequired[str] + + +class Security(BaseModel): + api_key: Annotated[ + Optional[str], + FieldMetadata( + security=SecurityMetadata( + scheme=True, + scheme_type="http", + sub_type="bearer", + field_name="Authorization", + ) + ), + ] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["ApiKey"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/src/mistralai/client/models/shareenum.py b/src/mistralai/client/models/shareenum.py new file mode 100644 index 00000000..08ffeb7e --- /dev/null +++ b/src/mistralai/client/models/shareenum.py @@ -0,0 +1,15 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: a0e2a7a16bf8 + +from __future__ import annotations +from mistralai.client.types import UnrecognizedStr +from typing import Literal, Union + + +ShareEnum = Union[ + Literal[ + "Viewer", + "Editor", + ], + UnrecognizedStr, +] diff --git a/src/mistralai/client/models/sharingdelete.py b/src/mistralai/client/models/sharingdelete.py new file mode 100644 index 00000000..33ccd7e7 --- /dev/null +++ b/src/mistralai/client/models/sharingdelete.py @@ -0,0 +1,57 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: f5ecce372e06 + +from __future__ import annotations +from .entitytype import EntityType +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class SharingDeleteTypedDict(TypedDict): + share_with_uuid: str + r"""The id of the entity (user, workspace or organization) to share with""" + share_with_type: EntityType + r"""The type of entity, used to share a library.""" + org_id: NotRequired[Nullable[str]] + + +class SharingDelete(BaseModel): + share_with_uuid: str + r"""The id of the entity (user, workspace or organization) to share with""" + + share_with_type: EntityType + r"""The type of entity, used to share a library.""" + + org_id: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["org_id"]) + nullable_fields = set(["org_id"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/sharingin.py b/src/mistralai/client/models/sharingin.py new file mode 100644 index 00000000..7c1a52b0 --- /dev/null +++ b/src/mistralai/client/models/sharingin.py @@ -0,0 +1,61 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: e953dda09c02 + +from __future__ import annotations +from .entitytype import EntityType +from .shareenum import ShareEnum +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class SharingInTypedDict(TypedDict): + level: ShareEnum + share_with_uuid: str + r"""The id of the entity (user, workspace or organization) to share with""" + share_with_type: EntityType + r"""The type of entity, used to share a library.""" + org_id: NotRequired[Nullable[str]] + + +class SharingIn(BaseModel): + level: ShareEnum + + share_with_uuid: str + r"""The id of the entity (user, workspace or organization) to share with""" + + share_with_type: EntityType + r"""The type of entity, used to share a library.""" + + org_id: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["org_id"]) + nullable_fields = set(["org_id"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/sharingout.py b/src/mistralai/client/models/sharingout.py new file mode 100644 index 00000000..ab3679a4 --- /dev/null +++ b/src/mistralai/client/models/sharingout.py @@ -0,0 +1,61 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 0b8804effb5c + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class SharingOutTypedDict(TypedDict): + library_id: str + org_id: str + role: str + share_with_type: str + share_with_uuid: Nullable[str] + user_id: NotRequired[Nullable[str]] + + +class SharingOut(BaseModel): + library_id: str + + org_id: str + + role: str + + share_with_type: str + + share_with_uuid: Nullable[str] + + user_id: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["user_id"]) + nullable_fields = set(["user_id", "share_with_uuid"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/source.py b/src/mistralai/client/models/source.py new file mode 100644 index 00000000..fcea403c --- /dev/null +++ b/src/mistralai/client/models/source.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: fcee60a4ea0d + +from __future__ import annotations +from mistralai.client.types import UnrecognizedStr +from typing import Literal, Union + + +Source = Union[ + Literal[ + "upload", + "repository", + "mistral", + ], + UnrecognizedStr, +] diff --git a/src/mistralai/client/models/ssetypes.py b/src/mistralai/client/models/ssetypes.py new file mode 100644 index 00000000..0add960b --- /dev/null +++ b/src/mistralai/client/models/ssetypes.py @@ -0,0 +1,24 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 1733e4765106 + +from __future__ import annotations +from mistralai.client.types import UnrecognizedStr +from typing import Literal, Union + + +SSETypes = Union[ + Literal[ + "conversation.response.started", + "conversation.response.done", + "conversation.response.error", + "message.output.delta", + "tool.execution.started", + "tool.execution.delta", + "tool.execution.done", + "agent.handoff.started", + "agent.handoff.done", + "function.call.delta", + ], + UnrecognizedStr, +] +r"""Server side events sent when streaming a conversation response.""" diff --git a/src/mistralai/client/models/systemmessage.py b/src/mistralai/client/models/systemmessage.py new file mode 100644 index 00000000..2602cd2d --- /dev/null +++ b/src/mistralai/client/models/systemmessage.py @@ -0,0 +1,45 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 500ef6e85ba1 + +from __future__ import annotations +from .systemmessagecontentchunks import ( + SystemMessageContentChunks, + SystemMessageContentChunksTypedDict, +) +from mistralai.client.types import BaseModel +from mistralai.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +SystemMessageContentTypedDict = TypeAliasType( + "SystemMessageContentTypedDict", + Union[str, List[SystemMessageContentChunksTypedDict]], +) + + +SystemMessageContent = TypeAliasType( + "SystemMessageContent", Union[str, List[SystemMessageContentChunks]] +) + + +class SystemMessageTypedDict(TypedDict): + content: SystemMessageContentTypedDict + role: Literal["system"] + + +class SystemMessage(BaseModel): + content: SystemMessageContent + + role: Annotated[ + Annotated[Literal["system"], AfterValidator(validate_const("system"))], + pydantic.Field(alias="role"), + ] = "system" + + +try: + SystemMessage.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/systemmessagecontentchunks.py b/src/mistralai/client/models/systemmessagecontentchunks.py new file mode 100644 index 00000000..d480a219 --- /dev/null +++ b/src/mistralai/client/models/systemmessagecontentchunks.py @@ -0,0 +1,22 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 297e8905d5af + +from __future__ import annotations +from .textchunk import TextChunk, TextChunkTypedDict +from .thinkchunk import ThinkChunk, ThinkChunkTypedDict +from mistralai.client.utils import get_discriminator +from pydantic import Discriminator, Tag +from typing import Union +from typing_extensions import Annotated, TypeAliasType + + +SystemMessageContentChunksTypedDict = TypeAliasType( + "SystemMessageContentChunksTypedDict", + Union[TextChunkTypedDict, ThinkChunkTypedDict], +) + + +SystemMessageContentChunks = Annotated[ + Union[Annotated[TextChunk, Tag("text")], Annotated[ThinkChunk, Tag("thinking")]], + Discriminator(lambda m: get_discriminator(m, "type", "type")), +] diff --git a/src/mistralai/client/models/textchunk.py b/src/mistralai/client/models/textchunk.py new file mode 100644 index 00000000..ac9f3137 --- /dev/null +++ b/src/mistralai/client/models/textchunk.py @@ -0,0 +1,47 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 9c96fb86a9ab + +from __future__ import annotations +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, TypedDict + + +class TextChunkTypedDict(TypedDict): + text: str + type: Literal["text"] + + +class TextChunk(BaseModel): + text: str + + type: Annotated[ + Annotated[Optional[Literal["text"]], AfterValidator(validate_const("text"))], + pydantic.Field(alias="type"), + ] = "text" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + TextChunk.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/thinkchunk.py b/src/mistralai/client/models/thinkchunk.py new file mode 100644 index 00000000..5995e601 --- /dev/null +++ b/src/mistralai/client/models/thinkchunk.py @@ -0,0 +1,64 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 294bfce193a4 + +from __future__ import annotations +from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict +from .textchunk import TextChunk, TextChunkTypedDict +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +ThinkChunkThinkingTypedDict = TypeAliasType( + "ThinkChunkThinkingTypedDict", Union[ReferenceChunkTypedDict, TextChunkTypedDict] +) + + +ThinkChunkThinking = TypeAliasType( + "ThinkChunkThinking", Union[ReferenceChunk, TextChunk] +) + + +class ThinkChunkTypedDict(TypedDict): + thinking: List[ThinkChunkThinkingTypedDict] + type: Literal["thinking"] + closed: NotRequired[bool] + r"""Whether the thinking chunk is closed or not. Currently only used for prefixing.""" + + +class ThinkChunk(BaseModel): + thinking: List[ThinkChunkThinking] + + type: Annotated[ + Annotated[Literal["thinking"], AfterValidator(validate_const("thinking"))], + pydantic.Field(alias="type"), + ] = "thinking" + + closed: Optional[bool] = None + r"""Whether the thinking chunk is closed or not. Currently only used for prefixing.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["closed"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ThinkChunk.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/timestampgranularity.py b/src/mistralai/client/models/timestampgranularity.py new file mode 100644 index 00000000..8d377375 --- /dev/null +++ b/src/mistralai/client/models/timestampgranularity.py @@ -0,0 +1,11 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 68ddf8d702ea + +from __future__ import annotations +from typing import Literal + + +TimestampGranularity = Literal[ + "segment", + "word", +] diff --git a/src/mistralai/client/models/tool.py b/src/mistralai/client/models/tool.py new file mode 100644 index 00000000..2b9965e5 --- /dev/null +++ b/src/mistralai/client/models/tool.py @@ -0,0 +1,37 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 48b4f6f50fe9 + +from __future__ import annotations +from .function import Function, FunctionTypedDict +from .tooltypes import ToolTypes +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class ToolTypedDict(TypedDict): + function: FunctionTypedDict + type: NotRequired[ToolTypes] + + +class Tool(BaseModel): + function: Function + + type: Optional[ToolTypes] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/src/mistralai/client/models/toolcall.py b/src/mistralai/client/models/toolcall.py new file mode 100644 index 00000000..181cec33 --- /dev/null +++ b/src/mistralai/client/models/toolcall.py @@ -0,0 +1,43 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: fb34a1a3f3c2 + +from __future__ import annotations +from .functioncall import FunctionCall, FunctionCallTypedDict +from .tooltypes import ToolTypes +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class ToolCallTypedDict(TypedDict): + function: FunctionCallTypedDict + id: NotRequired[str] + type: NotRequired[ToolTypes] + index: NotRequired[int] + + +class ToolCall(BaseModel): + function: FunctionCall + + id: Optional[str] = "null" + + type: Optional[ToolTypes] = None + + index: Optional[int] = 0 + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["id", "type", "index"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/src/mistralai/client/models/toolcallconfirmation.py b/src/mistralai/client/models/toolcallconfirmation.py new file mode 100644 index 00000000..fd6eca50 --- /dev/null +++ b/src/mistralai/client/models/toolcallconfirmation.py @@ -0,0 +1,24 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: f2e953cfb4fe + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing import Literal +from typing_extensions import TypedDict + + +Confirmation = Literal[ + "allow", + "deny", +] + + +class ToolCallConfirmationTypedDict(TypedDict): + tool_call_id: str + confirmation: Confirmation + + +class ToolCallConfirmation(BaseModel): + tool_call_id: str + + confirmation: Confirmation diff --git a/src/mistralai/client/models/toolchoice.py b/src/mistralai/client/models/toolchoice.py new file mode 100644 index 00000000..cb787df1 --- /dev/null +++ b/src/mistralai/client/models/toolchoice.py @@ -0,0 +1,43 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 14f7e4cc35b6 + +from __future__ import annotations +from .functionname import FunctionName, FunctionNameTypedDict +from .tooltypes import ToolTypes +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class ToolChoiceTypedDict(TypedDict): + r"""ToolChoice is either a ToolChoiceEnum or a ToolChoice""" + + function: FunctionNameTypedDict + r"""this restriction of `Function` is used to select a specific function to call""" + type: NotRequired[ToolTypes] + + +class ToolChoice(BaseModel): + r"""ToolChoice is either a ToolChoiceEnum or a ToolChoice""" + + function: FunctionName + r"""this restriction of `Function` is used to select a specific function to call""" + + type: Optional[ToolTypes] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/src/mistralai/client/models/toolchoiceenum.py b/src/mistralai/client/models/toolchoiceenum.py new file mode 100644 index 00000000..d66c3d07 --- /dev/null +++ b/src/mistralai/client/models/toolchoiceenum.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: c7798801f860 + +from __future__ import annotations +from mistralai.client.types import UnrecognizedStr +from typing import Literal, Union + + +ToolChoiceEnum = Union[ + Literal[ + "auto", + "none", + "any", + "required", + ], + UnrecognizedStr, +] diff --git a/src/mistralai/client/models/toolconfiguration.py b/src/mistralai/client/models/toolconfiguration.py new file mode 100644 index 00000000..b903c8b6 --- /dev/null +++ b/src/mistralai/client/models/toolconfiguration.py @@ -0,0 +1,53 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: faec24b75066 + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List +from typing_extensions import NotRequired, TypedDict + + +class ToolConfigurationTypedDict(TypedDict): + exclude: NotRequired[Nullable[List[str]]] + include: NotRequired[Nullable[List[str]]] + requires_confirmation: NotRequired[Nullable[List[str]]] + + +class ToolConfiguration(BaseModel): + exclude: OptionalNullable[List[str]] = UNSET + + include: OptionalNullable[List[str]] = UNSET + + requires_confirmation: OptionalNullable[List[str]] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["exclude", "include", "requires_confirmation"]) + nullable_fields = set(["exclude", "include", "requires_confirmation"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/toolexecutiondeltaevent.py b/src/mistralai/client/models/toolexecutiondeltaevent.py new file mode 100644 index 00000000..5a977ca6 --- /dev/null +++ b/src/mistralai/client/models/toolexecutiondeltaevent.py @@ -0,0 +1,74 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: df8f17cf3e07 + +from __future__ import annotations +from .builtinconnectors import BuiltInConnectors +from datetime import datetime +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +ToolExecutionDeltaEventNameTypedDict = TypeAliasType( + "ToolExecutionDeltaEventNameTypedDict", Union[BuiltInConnectors, str] +) + + +ToolExecutionDeltaEventName = TypeAliasType( + "ToolExecutionDeltaEventName", Union[BuiltInConnectors, str] +) + + +class ToolExecutionDeltaEventTypedDict(TypedDict): + id: str + name: ToolExecutionDeltaEventNameTypedDict + arguments: str + type: Literal["tool.execution.delta"] + created_at: NotRequired[datetime] + output_index: NotRequired[int] + + +class ToolExecutionDeltaEvent(BaseModel): + id: str + + name: ToolExecutionDeltaEventName + + arguments: str + + type: Annotated[ + Annotated[ + Literal["tool.execution.delta"], + AfterValidator(validate_const("tool.execution.delta")), + ], + pydantic.Field(alias="type"), + ] = "tool.execution.delta" + + created_at: Optional[datetime] = None + + output_index: Optional[int] = 0 + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["created_at", "output_index"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ToolExecutionDeltaEvent.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/toolexecutiondoneevent.py b/src/mistralai/client/models/toolexecutiondoneevent.py new file mode 100644 index 00000000..1c9b0ec9 --- /dev/null +++ b/src/mistralai/client/models/toolexecutiondoneevent.py @@ -0,0 +1,74 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 514fdee7d99f + +from __future__ import annotations +from .builtinconnectors import BuiltInConnectors +from datetime import datetime +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Any, Dict, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +ToolExecutionDoneEventNameTypedDict = TypeAliasType( + "ToolExecutionDoneEventNameTypedDict", Union[BuiltInConnectors, str] +) + + +ToolExecutionDoneEventName = TypeAliasType( + "ToolExecutionDoneEventName", Union[BuiltInConnectors, str] +) + + +class ToolExecutionDoneEventTypedDict(TypedDict): + id: str + name: ToolExecutionDoneEventNameTypedDict + type: Literal["tool.execution.done"] + created_at: NotRequired[datetime] + output_index: NotRequired[int] + info: NotRequired[Dict[str, Any]] + + +class ToolExecutionDoneEvent(BaseModel): + id: str + + name: ToolExecutionDoneEventName + + type: Annotated[ + Annotated[ + Literal["tool.execution.done"], + AfterValidator(validate_const("tool.execution.done")), + ], + pydantic.Field(alias="type"), + ] = "tool.execution.done" + + created_at: Optional[datetime] = None + + output_index: Optional[int] = 0 + + info: Optional[Dict[str, Any]] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["created_at", "output_index", "info"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ToolExecutionDoneEvent.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/toolexecutionentry.py b/src/mistralai/client/models/toolexecutionentry.py new file mode 100644 index 00000000..0d6f2a13 --- /dev/null +++ b/src/mistralai/client/models/toolexecutionentry.py @@ -0,0 +1,115 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 76db69eebe41 + +from __future__ import annotations +from .builtinconnectors import BuiltInConnectors +from datetime import datetime +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Any, Dict, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +ToolExecutionEntryNameTypedDict = TypeAliasType( + "ToolExecutionEntryNameTypedDict", Union[BuiltInConnectors, str] +) + + +ToolExecutionEntryName = TypeAliasType( + "ToolExecutionEntryName", Union[BuiltInConnectors, str] +) + + +class ToolExecutionEntryTypedDict(TypedDict): + name: ToolExecutionEntryNameTypedDict + arguments: str + object: Literal["entry"] + type: Literal["tool.execution"] + created_at: NotRequired[datetime] + completed_at: NotRequired[Nullable[datetime]] + agent_id: NotRequired[Nullable[str]] + model: NotRequired[Nullable[str]] + id: NotRequired[str] + info: NotRequired[Dict[str, Any]] + + +class ToolExecutionEntry(BaseModel): + name: ToolExecutionEntryName + + arguments: str + + object: Annotated[ + Annotated[Optional[Literal["entry"]], AfterValidator(validate_const("entry"))], + pydantic.Field(alias="object"), + ] = "entry" + + type: Annotated[ + Annotated[ + Optional[Literal["tool.execution"]], + AfterValidator(validate_const("tool.execution")), + ], + pydantic.Field(alias="type"), + ] = "tool.execution" + + created_at: Optional[datetime] = None + + completed_at: OptionalNullable[datetime] = UNSET + + agent_id: OptionalNullable[str] = UNSET + + model: OptionalNullable[str] = UNSET + + id: Optional[str] = None + + info: Optional[Dict[str, Any]] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "object", + "type", + "created_at", + "completed_at", + "agent_id", + "model", + "id", + "info", + ] + ) + nullable_fields = set(["completed_at", "agent_id", "model"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + ToolExecutionEntry.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/toolexecutionstartedevent.py b/src/mistralai/client/models/toolexecutionstartedevent.py new file mode 100644 index 00000000..21e5bfa8 --- /dev/null +++ b/src/mistralai/client/models/toolexecutionstartedevent.py @@ -0,0 +1,95 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 40fadb8e49a1 + +from __future__ import annotations +from .builtinconnectors import BuiltInConnectors +from datetime import datetime +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +ToolExecutionStartedEventNameTypedDict = TypeAliasType( + "ToolExecutionStartedEventNameTypedDict", Union[BuiltInConnectors, str] +) + + +ToolExecutionStartedEventName = TypeAliasType( + "ToolExecutionStartedEventName", Union[BuiltInConnectors, str] +) + + +class ToolExecutionStartedEventTypedDict(TypedDict): + id: str + name: ToolExecutionStartedEventNameTypedDict + arguments: str + type: Literal["tool.execution.started"] + created_at: NotRequired[datetime] + output_index: NotRequired[int] + model: NotRequired[Nullable[str]] + agent_id: NotRequired[Nullable[str]] + + +class ToolExecutionStartedEvent(BaseModel): + id: str + + name: ToolExecutionStartedEventName + + arguments: str + + type: Annotated[ + Annotated[ + Literal["tool.execution.started"], + AfterValidator(validate_const("tool.execution.started")), + ], + pydantic.Field(alias="type"), + ] = "tool.execution.started" + + created_at: Optional[datetime] = None + + output_index: Optional[int] = 0 + + model: OptionalNullable[str] = UNSET + + agent_id: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["created_at", "output_index", "model", "agent_id"]) + nullable_fields = set(["model", "agent_id"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + ToolExecutionStartedEvent.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/toolfilechunk.py b/src/mistralai/client/models/toolfilechunk.py new file mode 100644 index 00000000..0708b3ff --- /dev/null +++ b/src/mistralai/client/models/toolfilechunk.py @@ -0,0 +1,82 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 26c8aadf416a + +from __future__ import annotations +from .builtinconnectors import BuiltInConnectors +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +ToolFileChunkToolTypedDict = TypeAliasType( + "ToolFileChunkToolTypedDict", Union[BuiltInConnectors, str] +) + + +ToolFileChunkTool = TypeAliasType("ToolFileChunkTool", Union[BuiltInConnectors, str]) + + +class ToolFileChunkTypedDict(TypedDict): + tool: ToolFileChunkToolTypedDict + file_id: str + type: Literal["tool_file"] + file_name: NotRequired[Nullable[str]] + file_type: NotRequired[Nullable[str]] + + +class ToolFileChunk(BaseModel): + tool: ToolFileChunkTool + + file_id: str + + type: Annotated[ + Annotated[ + Optional[Literal["tool_file"]], AfterValidator(validate_const("tool_file")) + ], + pydantic.Field(alias="type"), + ] = "tool_file" + + file_name: OptionalNullable[str] = UNSET + + file_type: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type", "file_name", "file_type"]) + nullable_fields = set(["file_name", "file_type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + ToolFileChunk.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/toolmessage.py b/src/mistralai/client/models/toolmessage.py new file mode 100644 index 00000000..05a0ee63 --- /dev/null +++ b/src/mistralai/client/models/toolmessage.py @@ -0,0 +1,77 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 15f1af161031 + +from __future__ import annotations +from .contentchunk import ContentChunk, ContentChunkTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +ToolMessageContentTypedDict = TypeAliasType( + "ToolMessageContentTypedDict", Union[str, List[ContentChunkTypedDict]] +) + + +ToolMessageContent = TypeAliasType("ToolMessageContent", Union[str, List[ContentChunk]]) + + +class ToolMessageTypedDict(TypedDict): + content: Nullable[ToolMessageContentTypedDict] + role: Literal["tool"] + tool_call_id: NotRequired[Nullable[str]] + name: NotRequired[Nullable[str]] + + +class ToolMessage(BaseModel): + content: Nullable[ToolMessageContent] + + role: Annotated[ + Annotated[Literal["tool"], AfterValidator(validate_const("tool"))], + pydantic.Field(alias="role"), + ] = "tool" + + tool_call_id: OptionalNullable[str] = UNSET + + name: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["tool_call_id", "name"]) + nullable_fields = set(["content", "tool_call_id", "name"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + ToolMessage.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/toolreferencechunk.py b/src/mistralai/client/models/toolreferencechunk.py new file mode 100644 index 00000000..95454fe8 --- /dev/null +++ b/src/mistralai/client/models/toolreferencechunk.py @@ -0,0 +1,88 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 822e9f3e70de + +from __future__ import annotations +from .builtinconnectors import BuiltInConnectors +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +ToolReferenceChunkToolTypedDict = TypeAliasType( + "ToolReferenceChunkToolTypedDict", Union[BuiltInConnectors, str] +) + + +ToolReferenceChunkTool = TypeAliasType( + "ToolReferenceChunkTool", Union[BuiltInConnectors, str] +) + + +class ToolReferenceChunkTypedDict(TypedDict): + tool: ToolReferenceChunkToolTypedDict + title: str + type: Literal["tool_reference"] + url: NotRequired[Nullable[str]] + favicon: NotRequired[Nullable[str]] + description: NotRequired[Nullable[str]] + + +class ToolReferenceChunk(BaseModel): + tool: ToolReferenceChunkTool + + title: str + + type: Annotated[ + Annotated[ + Optional[Literal["tool_reference"]], + AfterValidator(validate_const("tool_reference")), + ], + pydantic.Field(alias="type"), + ] = "tool_reference" + + url: OptionalNullable[str] = UNSET + + favicon: OptionalNullable[str] = UNSET + + description: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type", "url", "favicon", "description"]) + nullable_fields = set(["url", "favicon", "description"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + ToolReferenceChunk.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/tooltypes.py b/src/mistralai/client/models/tooltypes.py new file mode 100644 index 00000000..e601c196 --- /dev/null +++ b/src/mistralai/client/models/tooltypes.py @@ -0,0 +1,9 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 86c3b54272fd + +from __future__ import annotations +from mistralai.client.types import UnrecognizedStr +from typing import Literal, Union + + +ToolTypes = Union[Literal["function",], UnrecognizedStr] diff --git a/src/mistralai/client/models/trainingfile.py b/src/mistralai/client/models/trainingfile.py new file mode 100644 index 00000000..2faeda8b --- /dev/null +++ b/src/mistralai/client/models/trainingfile.py @@ -0,0 +1,35 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 2edf9bce227d + +from __future__ import annotations +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class TrainingFileTypedDict(TypedDict): + file_id: str + weight: NotRequired[float] + + +class TrainingFile(BaseModel): + file_id: str + + weight: Optional[float] = 1 + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["weight"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/src/mistralai/client/models/transcriptionresponse.py b/src/mistralai/client/models/transcriptionresponse.py new file mode 100644 index 00000000..70315463 --- /dev/null +++ b/src/mistralai/client/models/transcriptionresponse.py @@ -0,0 +1,75 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 60896dbc6345 + +from __future__ import annotations +from .transcriptionsegmentchunk import ( + TranscriptionSegmentChunk, + TranscriptionSegmentChunkTypedDict, +) +from .usageinfo import UsageInfo, UsageInfoTypedDict +from mistralai.client.types import BaseModel, Nullable, UNSET_SENTINEL +import pydantic +from pydantic import ConfigDict, model_serializer +from typing import Any, Dict, List, Optional +from typing_extensions import NotRequired, TypedDict + + +class TranscriptionResponseTypedDict(TypedDict): + model: str + text: str + usage: UsageInfoTypedDict + language: Nullable[str] + segments: NotRequired[List[TranscriptionSegmentChunkTypedDict]] + + +class TranscriptionResponse(BaseModel): + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, extra="allow" + ) + __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) + + model: str + + text: str + + usage: UsageInfo + + language: Nullable[str] + + segments: Optional[List[TranscriptionSegmentChunk]] = None + + @property + def additional_properties(self): + return self.__pydantic_extra__ + + @additional_properties.setter + def additional_properties(self, value): + self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["segments"]) + nullable_fields = set(["language"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + for k, v in serialized.items(): + m[k] = v + + return m diff --git a/src/mistralai/client/models/transcriptionsegmentchunk.py b/src/mistralai/client/models/transcriptionsegmentchunk.py new file mode 100644 index 00000000..b87bfc2f --- /dev/null +++ b/src/mistralai/client/models/transcriptionsegmentchunk.py @@ -0,0 +1,93 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: d1e6f3bdc74b + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import ConfigDict, model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Any, Dict, Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class TranscriptionSegmentChunkTypedDict(TypedDict): + text: str + start: float + end: float + type: Literal["transcription_segment"] + score: NotRequired[Nullable[float]] + speaker_id: NotRequired[Nullable[str]] + + +class TranscriptionSegmentChunk(BaseModel): + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, extra="allow" + ) + __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) + + text: str + + start: float + + end: float + + type: Annotated[ + Annotated[ + Optional[Literal["transcription_segment"]], + AfterValidator(validate_const("transcription_segment")), + ], + pydantic.Field(alias="type"), + ] = "transcription_segment" + + score: OptionalNullable[float] = UNSET + + speaker_id: OptionalNullable[str] = UNSET + + @property + def additional_properties(self): + return self.__pydantic_extra__ + + @additional_properties.setter + def additional_properties(self, value): + self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type", "score", "speaker_id"]) + nullable_fields = set(["score", "speaker_id"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + for k, v in serialized.items(): + m[k] = v + + return m + + +try: + TranscriptionSegmentChunk.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/transcriptionstreamdone.py b/src/mistralai/client/models/transcriptionstreamdone.py new file mode 100644 index 00000000..e3c50169 --- /dev/null +++ b/src/mistralai/client/models/transcriptionstreamdone.py @@ -0,0 +1,92 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 066a9158ed09 + +from __future__ import annotations +from .transcriptionsegmentchunk import ( + TranscriptionSegmentChunk, + TranscriptionSegmentChunkTypedDict, +) +from .usageinfo import UsageInfo, UsageInfoTypedDict +from mistralai.client.types import BaseModel, Nullable, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import ConfigDict, model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Any, Dict, List, Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class TranscriptionStreamDoneTypedDict(TypedDict): + model: str + text: str + usage: UsageInfoTypedDict + language: Nullable[str] + segments: NotRequired[List[TranscriptionSegmentChunkTypedDict]] + type: Literal["transcription.done"] + + +class TranscriptionStreamDone(BaseModel): + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, extra="allow" + ) + __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) + + model: str + + text: str + + usage: UsageInfo + + language: Nullable[str] + + segments: Optional[List[TranscriptionSegmentChunk]] = None + + type: Annotated[ + Annotated[ + Literal["transcription.done"], + AfterValidator(validate_const("transcription.done")), + ], + pydantic.Field(alias="type"), + ] = "transcription.done" + + @property + def additional_properties(self): + return self.__pydantic_extra__ + + @additional_properties.setter + def additional_properties(self, value): + self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["segments"]) + nullable_fields = set(["language"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + for k, v in serialized.items(): + m[k] = v + + return m + + +try: + TranscriptionStreamDone.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/transcriptionstreamevents.py b/src/mistralai/client/models/transcriptionstreamevents.py new file mode 100644 index 00000000..073fd99a --- /dev/null +++ b/src/mistralai/client/models/transcriptionstreamevents.py @@ -0,0 +1,88 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: b50b3d74f16f + +from __future__ import annotations +from .transcriptionstreamdone import ( + TranscriptionStreamDone, + TranscriptionStreamDoneTypedDict, +) +from .transcriptionstreameventtypes import TranscriptionStreamEventTypes +from .transcriptionstreamlanguage import ( + TranscriptionStreamLanguage, + TranscriptionStreamLanguageTypedDict, +) +from .transcriptionstreamsegmentdelta import ( + TranscriptionStreamSegmentDelta, + TranscriptionStreamSegmentDeltaTypedDict, +) +from .transcriptionstreamtextdelta import ( + TranscriptionStreamTextDelta, + TranscriptionStreamTextDeltaTypedDict, +) +from functools import partial +from mistralai.client.types import BaseModel +from mistralai.client.utils.unions import parse_open_union +from pydantic import ConfigDict +from pydantic.functional_validators import BeforeValidator +from typing import Any, Literal, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +TranscriptionStreamEventsDataTypedDict = TypeAliasType( + "TranscriptionStreamEventsDataTypedDict", + Union[ + TranscriptionStreamTextDeltaTypedDict, + TranscriptionStreamLanguageTypedDict, + TranscriptionStreamSegmentDeltaTypedDict, + TranscriptionStreamDoneTypedDict, + ], +) + + +class UnknownTranscriptionStreamEventsData(BaseModel): + r"""A TranscriptionStreamEventsData variant the SDK doesn't recognize. Preserves the raw payload.""" + + type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_TRANSCRIPTION_STREAM_EVENTS_DATA_VARIANTS: dict[str, Any] = { + "transcription.done": TranscriptionStreamDone, + "transcription.language": TranscriptionStreamLanguage, + "transcription.segment": TranscriptionStreamSegmentDelta, + "transcription.text.delta": TranscriptionStreamTextDelta, +} + + +TranscriptionStreamEventsData = Annotated[ + Union[ + TranscriptionStreamDone, + TranscriptionStreamLanguage, + TranscriptionStreamSegmentDelta, + TranscriptionStreamTextDelta, + UnknownTranscriptionStreamEventsData, + ], + BeforeValidator( + partial( + parse_open_union, + disc_key="type", + variants=_TRANSCRIPTION_STREAM_EVENTS_DATA_VARIANTS, + unknown_cls=UnknownTranscriptionStreamEventsData, + union_name="TranscriptionStreamEventsData", + ) + ), +] + + +class TranscriptionStreamEventsTypedDict(TypedDict): + event: TranscriptionStreamEventTypes + data: TranscriptionStreamEventsDataTypedDict + + +class TranscriptionStreamEvents(BaseModel): + event: TranscriptionStreamEventTypes + + data: TranscriptionStreamEventsData diff --git a/src/mistralai/client/models/transcriptionstreameventtypes.py b/src/mistralai/client/models/transcriptionstreameventtypes.py new file mode 100644 index 00000000..c74bbb74 --- /dev/null +++ b/src/mistralai/client/models/transcriptionstreameventtypes.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 6f71f6fbf4c5 + +from __future__ import annotations +from mistralai.client.types import UnrecognizedStr +from typing import Literal, Union + + +TranscriptionStreamEventTypes = Union[ + Literal[ + "transcription.language", + "transcription.segment", + "transcription.text.delta", + "transcription.done", + ], + UnrecognizedStr, +] diff --git a/src/mistralai/client/models/transcriptionstreamlanguage.py b/src/mistralai/client/models/transcriptionstreamlanguage.py new file mode 100644 index 00000000..b6c61906 --- /dev/null +++ b/src/mistralai/client/models/transcriptionstreamlanguage.py @@ -0,0 +1,47 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: e94333e4bc27 + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import validate_const +import pydantic +from pydantic import ConfigDict +from pydantic.functional_validators import AfterValidator +from typing import Any, Dict, Literal +from typing_extensions import Annotated, TypedDict + + +class TranscriptionStreamLanguageTypedDict(TypedDict): + audio_language: str + type: Literal["transcription.language"] + + +class TranscriptionStreamLanguage(BaseModel): + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, extra="allow" + ) + __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) + + audio_language: str + + type: Annotated[ + Annotated[ + Literal["transcription.language"], + AfterValidator(validate_const("transcription.language")), + ], + pydantic.Field(alias="type"), + ] = "transcription.language" + + @property + def additional_properties(self): + return self.__pydantic_extra__ + + @additional_properties.setter + def additional_properties(self, value): + self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] + + +try: + TranscriptionStreamLanguage.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/transcriptionstreamsegmentdelta.py b/src/mistralai/client/models/transcriptionstreamsegmentdelta.py new file mode 100644 index 00000000..32ef8f9b --- /dev/null +++ b/src/mistralai/client/models/transcriptionstreamsegmentdelta.py @@ -0,0 +1,90 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: c0a882ce57e5 + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import ConfigDict, model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Any, Dict, Literal +from typing_extensions import Annotated, NotRequired, TypedDict + + +class TranscriptionStreamSegmentDeltaTypedDict(TypedDict): + text: str + start: float + end: float + type: Literal["transcription.segment"] + speaker_id: NotRequired[Nullable[str]] + + +class TranscriptionStreamSegmentDelta(BaseModel): + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, extra="allow" + ) + __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) + + text: str + + start: float + + end: float + + type: Annotated[ + Annotated[ + Literal["transcription.segment"], + AfterValidator(validate_const("transcription.segment")), + ], + pydantic.Field(alias="type"), + ] = "transcription.segment" + + speaker_id: OptionalNullable[str] = UNSET + + @property + def additional_properties(self): + return self.__pydantic_extra__ + + @additional_properties.setter + def additional_properties(self, value): + self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["speaker_id"]) + nullable_fields = set(["speaker_id"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + for k, v in serialized.items(): + m[k] = v + + return m + + +try: + TranscriptionStreamSegmentDelta.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/transcriptionstreamtextdelta.py b/src/mistralai/client/models/transcriptionstreamtextdelta.py new file mode 100644 index 00000000..42f0ffb7 --- /dev/null +++ b/src/mistralai/client/models/transcriptionstreamtextdelta.py @@ -0,0 +1,47 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 6086dc081147 + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import validate_const +import pydantic +from pydantic import ConfigDict +from pydantic.functional_validators import AfterValidator +from typing import Any, Dict, Literal +from typing_extensions import Annotated, TypedDict + + +class TranscriptionStreamTextDeltaTypedDict(TypedDict): + text: str + type: Literal["transcription.text.delta"] + + +class TranscriptionStreamTextDelta(BaseModel): + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, extra="allow" + ) + __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) + + text: str + + type: Annotated[ + Annotated[ + Literal["transcription.text.delta"], + AfterValidator(validate_const("transcription.text.delta")), + ], + pydantic.Field(alias="type"), + ] = "transcription.text.delta" + + @property + def additional_properties(self): + return self.__pydantic_extra__ + + @additional_properties.setter + def additional_properties(self, value): + self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] + + +try: + TranscriptionStreamTextDelta.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/unarchivemodelresponse.py b/src/mistralai/client/models/unarchivemodelresponse.py new file mode 100644 index 00000000..5c75d30e --- /dev/null +++ b/src/mistralai/client/models/unarchivemodelresponse.py @@ -0,0 +1,50 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 22e2ccbb0c80 + +from __future__ import annotations +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class UnarchiveModelResponseTypedDict(TypedDict): + id: str + object: Literal["model"] + archived: NotRequired[bool] + + +class UnarchiveModelResponse(BaseModel): + id: str + + object: Annotated[ + Annotated[Optional[Literal["model"]], AfterValidator(validate_const("model"))], + pydantic.Field(alias="object"), + ] = "model" + + archived: Optional[bool] = False + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["object", "archived"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + UnarchiveModelResponse.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/updateagentrequest.py b/src/mistralai/client/models/updateagentrequest.py new file mode 100644 index 00000000..b751ff74 --- /dev/null +++ b/src/mistralai/client/models/updateagentrequest.py @@ -0,0 +1,137 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 914b4b2be67a + +from __future__ import annotations +from .codeinterpretertool import CodeInterpreterTool, CodeInterpreterToolTypedDict +from .completionargs import CompletionArgs, CompletionArgsTypedDict +from .documentlibrarytool import DocumentLibraryTool, DocumentLibraryToolTypedDict +from .functiontool import FunctionTool, FunctionToolTypedDict +from .imagegenerationtool import ImageGenerationTool, ImageGenerationToolTypedDict +from .websearchpremiumtool import WebSearchPremiumTool, WebSearchPremiumToolTypedDict +from .websearchtool import WebSearchTool, WebSearchToolTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import Field, model_serializer +from typing import Any, Dict, List, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +UpdateAgentRequestToolTypedDict = TypeAliasType( + "UpdateAgentRequestToolTypedDict", + Union[ + FunctionToolTypedDict, + WebSearchToolTypedDict, + WebSearchPremiumToolTypedDict, + CodeInterpreterToolTypedDict, + ImageGenerationToolTypedDict, + DocumentLibraryToolTypedDict, + ], +) + + +UpdateAgentRequestTool = Annotated[ + Union[ + CodeInterpreterTool, + DocumentLibraryTool, + FunctionTool, + ImageGenerationTool, + WebSearchTool, + WebSearchPremiumTool, + ], + Field(discriminator="type"), +] + + +class UpdateAgentRequestTypedDict(TypedDict): + instructions: NotRequired[Nullable[str]] + r"""Instruction prompt the model will follow during the conversation.""" + tools: NotRequired[List[UpdateAgentRequestToolTypedDict]] + r"""List of tools which are available to the model during the conversation.""" + completion_args: NotRequired[CompletionArgsTypedDict] + r"""White-listed arguments from the completion API""" + model: NotRequired[Nullable[str]] + name: NotRequired[Nullable[str]] + description: NotRequired[Nullable[str]] + handoffs: NotRequired[Nullable[List[str]]] + deployment_chat: NotRequired[Nullable[bool]] + metadata: NotRequired[Nullable[Dict[str, Any]]] + version_message: NotRequired[Nullable[str]] + + +class UpdateAgentRequest(BaseModel): + instructions: OptionalNullable[str] = UNSET + r"""Instruction prompt the model will follow during the conversation.""" + + tools: Optional[List[UpdateAgentRequestTool]] = None + r"""List of tools which are available to the model during the conversation.""" + + completion_args: Optional[CompletionArgs] = None + r"""White-listed arguments from the completion API""" + + model: OptionalNullable[str] = UNSET + + name: OptionalNullable[str] = UNSET + + description: OptionalNullable[str] = UNSET + + handoffs: OptionalNullable[List[str]] = UNSET + + deployment_chat: OptionalNullable[bool] = UNSET + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + + version_message: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "instructions", + "tools", + "completion_args", + "model", + "name", + "description", + "handoffs", + "deployment_chat", + "metadata", + "version_message", + ] + ) + nullable_fields = set( + [ + "instructions", + "model", + "name", + "description", + "handoffs", + "deployment_chat", + "metadata", + "version_message", + ] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/updatedocumentrequest.py b/src/mistralai/client/models/updatedocumentrequest.py new file mode 100644 index 00000000..61e69655 --- /dev/null +++ b/src/mistralai/client/models/updatedocumentrequest.py @@ -0,0 +1,67 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: a8cfda07d337 + +from __future__ import annotations +from datetime import datetime +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Dict, List, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +AttributesTypedDict = TypeAliasType( + "AttributesTypedDict", + Union[ + bool, str, int, float, datetime, List[str], List[int], List[float], List[bool] + ], +) + + +Attributes = TypeAliasType( + "Attributes", + Union[ + bool, str, int, float, datetime, List[str], List[int], List[float], List[bool] + ], +) + + +class UpdateDocumentRequestTypedDict(TypedDict): + name: NotRequired[Nullable[str]] + attributes: NotRequired[Nullable[Dict[str, AttributesTypedDict]]] + + +class UpdateDocumentRequest(BaseModel): + name: OptionalNullable[str] = UNSET + + attributes: OptionalNullable[Dict[str, Attributes]] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["name", "attributes"]) + nullable_fields = set(["name", "attributes"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/updatelibraryrequest.py b/src/mistralai/client/models/updatelibraryrequest.py new file mode 100644 index 00000000..91cbf2a1 --- /dev/null +++ b/src/mistralai/client/models/updatelibraryrequest.py @@ -0,0 +1,49 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 51bc63885337 + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class UpdateLibraryRequestTypedDict(TypedDict): + name: NotRequired[Nullable[str]] + description: NotRequired[Nullable[str]] + + +class UpdateLibraryRequest(BaseModel): + name: OptionalNullable[str] = UNSET + + description: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["name", "description"]) + nullable_fields = set(["name", "description"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/updatemodelrequest.py b/src/mistralai/client/models/updatemodelrequest.py new file mode 100644 index 00000000..f685cfcc --- /dev/null +++ b/src/mistralai/client/models/updatemodelrequest.py @@ -0,0 +1,49 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: fe649967751e + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class UpdateModelRequestTypedDict(TypedDict): + name: NotRequired[Nullable[str]] + description: NotRequired[Nullable[str]] + + +class UpdateModelRequest(BaseModel): + name: OptionalNullable[str] = UNSET + + description: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["name", "description"]) + nullable_fields = set(["name", "description"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/usageinfo.py b/src/mistralai/client/models/usageinfo.py new file mode 100644 index 00000000..31cbf07e --- /dev/null +++ b/src/mistralai/client/models/usageinfo.py @@ -0,0 +1,80 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 54adb9a3af16 + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +import pydantic +from pydantic import ConfigDict, model_serializer +from typing import Any, Dict, Optional +from typing_extensions import NotRequired, TypedDict + + +class UsageInfoTypedDict(TypedDict): + prompt_tokens: NotRequired[int] + completion_tokens: NotRequired[int] + total_tokens: NotRequired[int] + prompt_audio_seconds: NotRequired[Nullable[int]] + + +class UsageInfo(BaseModel): + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, extra="allow" + ) + __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) + + prompt_tokens: Optional[int] = 0 + + completion_tokens: Optional[int] = 0 + + total_tokens: Optional[int] = 0 + + prompt_audio_seconds: OptionalNullable[int] = UNSET + + @property + def additional_properties(self): + return self.__pydantic_extra__ + + @additional_properties.setter + def additional_properties(self, value): + self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "prompt_tokens", + "completion_tokens", + "total_tokens", + "prompt_audio_seconds", + ] + ) + nullable_fields = set(["prompt_audio_seconds"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + for k, v in serialized.items(): + m[k] = v + + return m diff --git a/src/mistralai/client/models/usermessage.py b/src/mistralai/client/models/usermessage.py new file mode 100644 index 00000000..63e76792 --- /dev/null +++ b/src/mistralai/client/models/usermessage.py @@ -0,0 +1,54 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: cb583483acf4 + +from __future__ import annotations +from .contentchunk import ContentChunk, ContentChunkTypedDict +from mistralai.client.types import BaseModel, Nullable, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +UserMessageContentTypedDict = TypeAliasType( + "UserMessageContentTypedDict", Union[str, List[ContentChunkTypedDict]] +) + + +UserMessageContent = TypeAliasType("UserMessageContent", Union[str, List[ContentChunk]]) + + +class UserMessageTypedDict(TypedDict): + content: Nullable[UserMessageContentTypedDict] + role: Literal["user"] + + +class UserMessage(BaseModel): + content: Nullable[UserMessageContent] + + role: Annotated[ + Annotated[Literal["user"], AfterValidator(validate_const("user"))], + pydantic.Field(alias="role"), + ] = "user" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + m[k] = val + + return m + + +try: + UserMessage.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/validationerror.py b/src/mistralai/client/models/validationerror.py new file mode 100644 index 00000000..385714c8 --- /dev/null +++ b/src/mistralai/client/models/validationerror.py @@ -0,0 +1,27 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 15df3c7368ab + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing import List, Union +from typing_extensions import TypeAliasType, TypedDict + + +LocTypedDict = TypeAliasType("LocTypedDict", Union[str, int]) + + +Loc = TypeAliasType("Loc", Union[str, int]) + + +class ValidationErrorTypedDict(TypedDict): + loc: List[LocTypedDict] + msg: str + type: str + + +class ValidationError(BaseModel): + loc: List[Loc] + + msg: str + + type: str diff --git a/src/mistralai/client/models/wandbintegration.py b/src/mistralai/client/models/wandbintegration.py new file mode 100644 index 00000000..f0df2c77 --- /dev/null +++ b/src/mistralai/client/models/wandbintegration.py @@ -0,0 +1,77 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 4823c1e80942 + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal +from typing_extensions import Annotated, NotRequired, TypedDict + + +class WandbIntegrationTypedDict(TypedDict): + project: str + r"""The name of the project that the new run will be created under.""" + api_key: str + r"""The WandB API key to use for authentication.""" + type: Literal["wandb"] + name: NotRequired[Nullable[str]] + r"""A display name to set for the run. If not set, will use the job ID as the name.""" + run_name: NotRequired[Nullable[str]] + + +class WandbIntegration(BaseModel): + project: str + r"""The name of the project that the new run will be created under.""" + + api_key: str + r"""The WandB API key to use for authentication.""" + + type: Annotated[ + Annotated[Literal["wandb"], AfterValidator(validate_const("wandb"))], + pydantic.Field(alias="type"), + ] = "wandb" + + name: OptionalNullable[str] = UNSET + r"""A display name to set for the run. If not set, will use the job ID as the name.""" + + run_name: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["name", "run_name"]) + nullable_fields = set(["name", "run_name"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + WandbIntegration.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/wandbintegrationresult.py b/src/mistralai/client/models/wandbintegrationresult.py new file mode 100644 index 00000000..575cbd42 --- /dev/null +++ b/src/mistralai/client/models/wandbintegrationresult.py @@ -0,0 +1,75 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 8787b4ad5458 + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal +from typing_extensions import Annotated, NotRequired, TypedDict + + +class WandbIntegrationResultTypedDict(TypedDict): + project: str + r"""The name of the project that the new run will be created under.""" + type: Literal["wandb"] + name: NotRequired[Nullable[str]] + r"""A display name to set for the run. If not set, will use the job ID as the name.""" + run_name: NotRequired[Nullable[str]] + url: NotRequired[Nullable[str]] + + +class WandbIntegrationResult(BaseModel): + project: str + r"""The name of the project that the new run will be created under.""" + + type: Annotated[ + Annotated[Literal["wandb"], AfterValidator(validate_const("wandb"))], + pydantic.Field(alias="type"), + ] = "wandb" + + name: OptionalNullable[str] = UNSET + r"""A display name to set for the run. If not set, will use the job ID as the name.""" + + run_name: OptionalNullable[str] = UNSET + + url: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["name", "run_name", "url"]) + nullable_fields = set(["name", "run_name", "url"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + WandbIntegrationResult.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/websearchpremiumtool.py b/src/mistralai/client/models/websearchpremiumtool.py new file mode 100644 index 00000000..00d4a4b4 --- /dev/null +++ b/src/mistralai/client/models/websearchpremiumtool.py @@ -0,0 +1,66 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: bfe88af887e3 + +from __future__ import annotations +from .toolconfiguration import ToolConfiguration, ToolConfigurationTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal +from typing_extensions import Annotated, NotRequired, TypedDict + + +class WebSearchPremiumToolTypedDict(TypedDict): + tool_configuration: NotRequired[Nullable[ToolConfigurationTypedDict]] + type: Literal["web_search_premium"] + + +class WebSearchPremiumTool(BaseModel): + tool_configuration: OptionalNullable[ToolConfiguration] = UNSET + + type: Annotated[ + Annotated[ + Literal["web_search_premium"], + AfterValidator(validate_const("web_search_premium")), + ], + pydantic.Field(alias="type"), + ] = "web_search_premium" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["tool_configuration"]) + nullable_fields = set(["tool_configuration"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + WebSearchPremiumTool.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/websearchtool.py b/src/mistralai/client/models/websearchtool.py new file mode 100644 index 00000000..6871080f --- /dev/null +++ b/src/mistralai/client/models/websearchtool.py @@ -0,0 +1,63 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 26b0903423e5 + +from __future__ import annotations +from .toolconfiguration import ToolConfiguration, ToolConfigurationTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal +from typing_extensions import Annotated, NotRequired, TypedDict + + +class WebSearchToolTypedDict(TypedDict): + tool_configuration: NotRequired[Nullable[ToolConfigurationTypedDict]] + type: Literal["web_search"] + + +class WebSearchTool(BaseModel): + tool_configuration: OptionalNullable[ToolConfiguration] = UNSET + + type: Annotated[ + Annotated[Literal["web_search"], AfterValidator(validate_const("web_search"))], + pydantic.Field(alias="type"), + ] = "web_search" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["tool_configuration"]) + nullable_fields = set(["tool_configuration"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + WebSearchTool.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models_.py b/src/mistralai/client/models_.py new file mode 100644 index 00000000..a287c413 --- /dev/null +++ b/src/mistralai/client/models_.py @@ -0,0 +1,1070 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 1d277958a843 + +from .basesdk import BaseSDK +from mistralai.client import errors, models, utils +from mistralai.client._hooks import HookContext +from mistralai.client.types import OptionalNullable, UNSET +from mistralai.client.utils import get_security_from_env +from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response +from typing import Any, Mapping, Optional + + +class Models(BaseSDK): + r"""Model Management API""" + + def list( + self, + *, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ModelList: + r"""List Models + + List all models available to the user. + + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + req = self._build_request( + method="GET", + path="/v1/models", + base_url=base_url, + url_variables=url_variables, + request=None, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="list_models_v1_models_get", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ModelList, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def list_async( + self, + *, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ModelList: + r"""List Models + + List all models available to the user. + + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + req = self._build_request_async( + method="GET", + path="/v1/models", + base_url=base_url, + url_variables=url_variables, + request=None, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="list_models_v1_models_get", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ModelList, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def retrieve( + self, + *, + model_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ResponseRetrieveModelV1ModelsModelIDGet: + r"""Retrieve Model + + Retrieve information about a model. + + :param model_id: The ID of the model to retrieve. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.RetrieveModelV1ModelsModelIDGetRequest( + model_id=model_id, + ) + + req = self._build_request( + method="GET", + path="/v1/models/{model_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="retrieve_model_v1_models__model_id__get", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response( + models.ResponseRetrieveModelV1ModelsModelIDGet, http_res + ) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def retrieve_async( + self, + *, + model_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ResponseRetrieveModelV1ModelsModelIDGet: + r"""Retrieve Model + + Retrieve information about a model. + + :param model_id: The ID of the model to retrieve. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.RetrieveModelV1ModelsModelIDGetRequest( + model_id=model_id, + ) + + req = self._build_request_async( + method="GET", + path="/v1/models/{model_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="retrieve_model_v1_models__model_id__get", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response( + models.ResponseRetrieveModelV1ModelsModelIDGet, http_res + ) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def delete( + self, + *, + model_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.DeleteModelOut: + r"""Delete Model + + Delete a fine-tuned model. + + :param model_id: The ID of the model to delete. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.DeleteModelV1ModelsModelIDDeleteRequest( + model_id=model_id, + ) + + req = self._build_request( + method="DELETE", + path="/v1/models/{model_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="delete_model_v1_models__model_id__delete", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.DeleteModelOut, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def delete_async( + self, + *, + model_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.DeleteModelOut: + r"""Delete Model + + Delete a fine-tuned model. + + :param model_id: The ID of the model to delete. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.DeleteModelV1ModelsModelIDDeleteRequest( + model_id=model_id, + ) + + req = self._build_request_async( + method="DELETE", + path="/v1/models/{model_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="delete_model_v1_models__model_id__delete", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.DeleteModelOut, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def update( + self, + *, + model_id: str, + name: OptionalNullable[str] = UNSET, + description: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.JobsAPIRoutesFineTuningUpdateFineTunedModelResponse: + r"""Update Fine Tuned Model + + Update a model name or description. + + :param model_id: The ID of the model to update. + :param name: + :param description: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JobsAPIRoutesFineTuningUpdateFineTunedModelRequest( + model_id=model_id, + update_model_request=models.UpdateModelRequest( + name=name, + description=description, + ), + ) + + req = self._build_request( + method="PATCH", + path="/v1/fine_tuning/models/{model_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.update_model_request, + False, + False, + "json", + models.UpdateModelRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_fine_tuning_update_fine_tuned_model", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response( + models.JobsAPIRoutesFineTuningUpdateFineTunedModelResponse, http_res + ) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def update_async( + self, + *, + model_id: str, + name: OptionalNullable[str] = UNSET, + description: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.JobsAPIRoutesFineTuningUpdateFineTunedModelResponse: + r"""Update Fine Tuned Model + + Update a model name or description. + + :param model_id: The ID of the model to update. + :param name: + :param description: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JobsAPIRoutesFineTuningUpdateFineTunedModelRequest( + model_id=model_id, + update_model_request=models.UpdateModelRequest( + name=name, + description=description, + ), + ) + + req = self._build_request_async( + method="PATCH", + path="/v1/fine_tuning/models/{model_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.update_model_request, + False, + False, + "json", + models.UpdateModelRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_fine_tuning_update_fine_tuned_model", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response( + models.JobsAPIRoutesFineTuningUpdateFineTunedModelResponse, http_res + ) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def archive( + self, + *, + model_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ArchiveModelResponse: + r"""Archive Fine Tuned Model + + Archive a fine-tuned model. + + :param model_id: The ID of the model to archive. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JobsAPIRoutesFineTuningArchiveFineTunedModelRequest( + model_id=model_id, + ) + + req = self._build_request( + method="POST", + path="/v1/fine_tuning/models/{model_id}/archive", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_fine_tuning_archive_fine_tuned_model", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ArchiveModelResponse, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def archive_async( + self, + *, + model_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ArchiveModelResponse: + r"""Archive Fine Tuned Model + + Archive a fine-tuned model. + + :param model_id: The ID of the model to archive. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JobsAPIRoutesFineTuningArchiveFineTunedModelRequest( + model_id=model_id, + ) + + req = self._build_request_async( + method="POST", + path="/v1/fine_tuning/models/{model_id}/archive", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_fine_tuning_archive_fine_tuned_model", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ArchiveModelResponse, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def unarchive( + self, + *, + model_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.UnarchiveModelResponse: + r"""Unarchive Fine Tuned Model + + Un-archive a fine-tuned model. + + :param model_id: The ID of the model to unarchive. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest( + model_id=model_id, + ) + + req = self._build_request( + method="DELETE", + path="/v1/fine_tuning/models/{model_id}/archive", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_fine_tuning_unarchive_fine_tuned_model", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.UnarchiveModelResponse, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def unarchive_async( + self, + *, + model_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.UnarchiveModelResponse: + r"""Unarchive Fine Tuned Model + + Un-archive a fine-tuned model. + + :param model_id: The ID of the model to unarchive. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest( + model_id=model_id, + ) + + req = self._build_request_async( + method="DELETE", + path="/v1/fine_tuning/models/{model_id}/archive", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_fine_tuning_unarchive_fine_tuned_model", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.UnarchiveModelResponse, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/client/ocr.py b/src/mistralai/client/ocr.py new file mode 100644 index 00000000..a46119d1 --- /dev/null +++ b/src/mistralai/client/ocr.py @@ -0,0 +1,284 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 2f804a12fc62 + +from .basesdk import BaseSDK +from mistralai.client import errors, models, utils +from mistralai.client._hooks import HookContext +from mistralai.client.types import Nullable, OptionalNullable, UNSET +from mistralai.client.utils import get_security_from_env +from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response +from typing import Any, List, Mapping, Optional, Union + + +class Ocr(BaseSDK): + r"""OCR API""" + + def process( + self, + *, + model: Nullable[str], + document: Union[models.DocumentUnion, models.DocumentUnionTypedDict], + id: Optional[str] = None, + pages: OptionalNullable[List[int]] = UNSET, + include_image_base64: OptionalNullable[bool] = UNSET, + image_limit: OptionalNullable[int] = UNSET, + image_min_size: OptionalNullable[int] = UNSET, + bbox_annotation_format: OptionalNullable[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = UNSET, + document_annotation_format: OptionalNullable[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = UNSET, + document_annotation_prompt: OptionalNullable[str] = UNSET, + table_format: OptionalNullable[models.TableFormat] = UNSET, + extract_header: Optional[bool] = None, + extract_footer: Optional[bool] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.OCRResponse: + r"""OCR + + :param model: + :param document: Document to run OCR on + :param id: + :param pages: Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0 + :param include_image_base64: Include image URLs in response + :param image_limit: Max images to extract + :param image_min_size: Minimum height and width of image to extract + :param bbox_annotation_format: Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field + :param document_annotation_format: Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field + :param document_annotation_prompt: Optional prompt to guide the model in extracting structured output from the entire document. A document_annotation_format must be provided. + :param table_format: + :param extract_header: + :param extract_footer: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.OCRRequest( + model=model, + id=id, + document=utils.get_pydantic_model(document, models.DocumentUnion), + pages=pages, + include_image_base64=include_image_base64, + image_limit=image_limit, + image_min_size=image_min_size, + bbox_annotation_format=utils.get_pydantic_model( + bbox_annotation_format, OptionalNullable[models.ResponseFormat] + ), + document_annotation_format=utils.get_pydantic_model( + document_annotation_format, OptionalNullable[models.ResponseFormat] + ), + document_annotation_prompt=document_annotation_prompt, + table_format=table_format, + extract_header=extract_header, + extract_footer=extract_footer, + ) + + req = self._build_request( + method="POST", + path="/v1/ocr", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.OCRRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="ocr_v1_ocr_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.OCRResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def process_async( + self, + *, + model: Nullable[str], + document: Union[models.DocumentUnion, models.DocumentUnionTypedDict], + id: Optional[str] = None, + pages: OptionalNullable[List[int]] = UNSET, + include_image_base64: OptionalNullable[bool] = UNSET, + image_limit: OptionalNullable[int] = UNSET, + image_min_size: OptionalNullable[int] = UNSET, + bbox_annotation_format: OptionalNullable[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = UNSET, + document_annotation_format: OptionalNullable[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = UNSET, + document_annotation_prompt: OptionalNullable[str] = UNSET, + table_format: OptionalNullable[models.TableFormat] = UNSET, + extract_header: Optional[bool] = None, + extract_footer: Optional[bool] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.OCRResponse: + r"""OCR + + :param model: + :param document: Document to run OCR on + :param id: + :param pages: Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0 + :param include_image_base64: Include image URLs in response + :param image_limit: Max images to extract + :param image_min_size: Minimum height and width of image to extract + :param bbox_annotation_format: Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field + :param document_annotation_format: Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field + :param document_annotation_prompt: Optional prompt to guide the model in extracting structured output from the entire document. A document_annotation_format must be provided. + :param table_format: + :param extract_header: + :param extract_footer: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.OCRRequest( + model=model, + id=id, + document=utils.get_pydantic_model(document, models.DocumentUnion), + pages=pages, + include_image_base64=include_image_base64, + image_limit=image_limit, + image_min_size=image_min_size, + bbox_annotation_format=utils.get_pydantic_model( + bbox_annotation_format, OptionalNullable[models.ResponseFormat] + ), + document_annotation_format=utils.get_pydantic_model( + document_annotation_format, OptionalNullable[models.ResponseFormat] + ), + document_annotation_prompt=document_annotation_prompt, + table_format=table_format, + extract_header=extract_header, + extract_footer=extract_footer, + ) + + req = self._build_request_async( + method="POST", + path="/v1/ocr", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.OCRRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="ocr_v1_ocr_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.OCRResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/client/py.typed b/src/mistralai/client/py.typed new file mode 100644 index 00000000..3e38f1a9 --- /dev/null +++ b/src/mistralai/client/py.typed @@ -0,0 +1 @@ +# Marker file for PEP 561. The package enables type hints. diff --git a/src/mistralai/client/sdk.py b/src/mistralai/client/sdk.py new file mode 100644 index 00000000..80bf25a7 --- /dev/null +++ b/src/mistralai/client/sdk.py @@ -0,0 +1,223 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 48edbcb38d7e + +from .basesdk import BaseSDK +from .httpclient import AsyncHttpClient, ClientOwner, HttpClient, close_clients +from .sdkconfiguration import SDKConfiguration +from .utils.logger import Logger, get_default_logger +from .utils.retries import RetryConfig +import httpx +import importlib +from mistralai.client import models as models_, utils +from mistralai.client._hooks import SDKHooks +from mistralai.client.types import OptionalNullable, UNSET +import sys +from typing import Any, Callable, Dict, Optional, TYPE_CHECKING, Union, cast +import weakref + +if TYPE_CHECKING: + from mistralai.client.agents import Agents + from mistralai.client.audio import Audio + from mistralai.client.batch import Batch + from mistralai.client.beta import Beta + from mistralai.client.chat import Chat + from mistralai.client.classifiers import Classifiers + from mistralai.client.embeddings import Embeddings + from mistralai.client.files import Files + from mistralai.client.fim import Fim + from mistralai.client.fine_tuning import FineTuning + from mistralai.client.models_ import Models + from mistralai.client.ocr import Ocr + + +class Mistral(BaseSDK): + r"""Mistral AI API: Our Chat Completion and Embeddings APIs specification. Create your account on [La Plateforme](https://console.mistral.ai) to get access and read the [docs](https://docs.mistral.ai) to learn how to use it.""" + + models: "Models" + r"""Model Management API""" + beta: "Beta" + files: "Files" + r"""Files API""" + fine_tuning: "FineTuning" + batch: "Batch" + chat: "Chat" + r"""Chat Completion API.""" + fim: "Fim" + r"""Fill-in-the-middle API.""" + agents: "Agents" + r"""Agents API.""" + embeddings: "Embeddings" + r"""Embeddings API.""" + classifiers: "Classifiers" + r"""Classifiers API.""" + ocr: "Ocr" + r"""OCR API""" + audio: "Audio" + _sub_sdk_map = { + "models": ("mistralai.client.models_", "Models"), + "beta": ("mistralai.client.beta", "Beta"), + "files": ("mistralai.client.files", "Files"), + "fine_tuning": ("mistralai.client.fine_tuning", "FineTuning"), + "batch": ("mistralai.client.batch", "Batch"), + "chat": ("mistralai.client.chat", "Chat"), + "fim": ("mistralai.client.fim", "Fim"), + "agents": ("mistralai.client.agents", "Agents"), + "embeddings": ("mistralai.client.embeddings", "Embeddings"), + "classifiers": ("mistralai.client.classifiers", "Classifiers"), + "ocr": ("mistralai.client.ocr", "Ocr"), + "audio": ("mistralai.client.audio", "Audio"), + } + + def __init__( + self, + api_key: Optional[Union[Optional[str], Callable[[], Optional[str]]]] = None, + server: Optional[str] = None, + server_url: Optional[str] = None, + url_params: Optional[Dict[str, str]] = None, + client: Optional[HttpClient] = None, + async_client: Optional[AsyncHttpClient] = None, + retry_config: OptionalNullable[RetryConfig] = UNSET, + timeout_ms: Optional[int] = None, + debug_logger: Optional[Logger] = None, + ) -> None: + r"""Instantiates the SDK configuring it with the provided parameters. + + :param api_key: The api_key required for authentication + :param server: The server by name to use for all methods + :param server_url: The server URL to use for all methods + :param url_params: Parameters to optionally template the server URL with + :param client: The HTTP client to use for all synchronous methods + :param async_client: The Async HTTP client to use for all asynchronous methods + :param retry_config: The retry configuration to use for all supported methods + :param timeout_ms: Optional request timeout applied to each operation in milliseconds + """ + client_supplied = True + if client is None: + client = httpx.Client(follow_redirects=True) + client_supplied = False + + assert issubclass( + type(client), HttpClient + ), "The provided client must implement the HttpClient protocol." + + async_client_supplied = True + if async_client is None: + async_client = httpx.AsyncClient(follow_redirects=True) + async_client_supplied = False + + if debug_logger is None: + debug_logger = get_default_logger() + + assert issubclass( + type(async_client), AsyncHttpClient + ), "The provided async_client must implement the AsyncHttpClient protocol." + + security: Any = None + if callable(api_key): + # pylint: disable=unnecessary-lambda-assignment + security = lambda: models_.Security(api_key=api_key()) + else: + security = models_.Security(api_key=api_key) + + if server_url is not None: + if url_params is not None: + server_url = utils.template_url(server_url, url_params) + + BaseSDK.__init__( + self, + SDKConfiguration( + client=client, + client_supplied=client_supplied, + async_client=async_client, + async_client_supplied=async_client_supplied, + security=security, + server_url=server_url, + server=server, + retry_config=retry_config, + timeout_ms=timeout_ms, + debug_logger=debug_logger, + ), + parent_ref=self, + ) + + hooks = SDKHooks() + + # pylint: disable=protected-access + self.sdk_configuration.__dict__["_hooks"] = hooks + + current_server_url, *_ = self.sdk_configuration.get_server_details() + server_url, self.sdk_configuration.client = hooks.sdk_init( + current_server_url, client + ) + if current_server_url != server_url: + self.sdk_configuration.server_url = server_url + + weakref.finalize( + self, + close_clients, + cast(ClientOwner, self.sdk_configuration), + self.sdk_configuration.client, + self.sdk_configuration.client_supplied, + self.sdk_configuration.async_client, + self.sdk_configuration.async_client_supplied, + ) + + def dynamic_import(self, modname, retries=3): + for attempt in range(retries): + try: + return importlib.import_module(modname) + except KeyError: + # Clear any half-initialized module and retry + sys.modules.pop(modname, None) + if attempt == retries - 1: + break + raise KeyError(f"Failed to import module '{modname}' after {retries} attempts") + + def __getattr__(self, name: str): + if name in self._sub_sdk_map: + module_path, class_name = self._sub_sdk_map[name] + try: + module = self.dynamic_import(module_path) + klass = getattr(module, class_name) + instance = klass(self.sdk_configuration, parent_ref=self) + setattr(self, name, instance) + return instance + except ImportError as e: + raise AttributeError( + f"Failed to import module {module_path} for attribute {name}: {e}" + ) from e + except AttributeError as e: + raise AttributeError( + f"Failed to find class {class_name} in module {module_path} for attribute {name}: {e}" + ) from e + + raise AttributeError( + f"'{type(self).__name__}' object has no attribute '{name}'" + ) + + def __dir__(self): + default_attrs = list(super().__dir__()) + lazy_attrs = list(self._sub_sdk_map.keys()) + return sorted(list(set(default_attrs + lazy_attrs))) + + def __enter__(self): + return self + + async def __aenter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + if ( + self.sdk_configuration.client is not None + and not self.sdk_configuration.client_supplied + ): + self.sdk_configuration.client.close() + self.sdk_configuration.client = None + + async def __aexit__(self, exc_type, exc_val, exc_tb): + if ( + self.sdk_configuration.async_client is not None + and not self.sdk_configuration.async_client_supplied + ): + await self.sdk_configuration.async_client.aclose() + self.sdk_configuration.async_client = None diff --git a/src/mistralai/client/sdkconfiguration.py b/src/mistralai/client/sdkconfiguration.py new file mode 100644 index 00000000..712e92e0 --- /dev/null +++ b/src/mistralai/client/sdkconfiguration.py @@ -0,0 +1,54 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: b7dd68a0235e + +from ._version import ( + __gen_version__, + __openapi_doc_version__, + __user_agent__, + __version__, +) +from .httpclient import AsyncHttpClient, HttpClient +from .utils import Logger, RetryConfig, remove_suffix +from dataclasses import dataclass +from mistralai.client import models +from mistralai.client.types import OptionalNullable, UNSET +from pydantic import Field +from typing import Callable, Dict, Optional, Tuple, Union + + +SERVER_EU = "eu" +r"""EU Production server""" +SERVERS = { + SERVER_EU: "https://api.mistral.ai", +} +"""Contains the list of servers available to the SDK""" + + +@dataclass +class SDKConfiguration: + client: Union[HttpClient, None] + client_supplied: bool + async_client: Union[AsyncHttpClient, None] + async_client_supplied: bool + debug_logger: Logger + security: Optional[Union[models.Security, Callable[[], models.Security]]] = None + server_url: Optional[str] = "" + server: Optional[str] = "" + language: str = "python" + openapi_doc_version: str = __openapi_doc_version__ + sdk_version: str = __version__ + gen_version: str = __gen_version__ + user_agent: str = __user_agent__ + retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) + timeout_ms: Optional[int] = None + + def get_server_details(self) -> Tuple[str, Dict[str, str]]: + if self.server_url is not None and self.server_url: + return remove_suffix(self.server_url, "/"), {} + if not self.server: + self.server = SERVER_EU + + if self.server not in SERVERS: + raise ValueError(f'Invalid server "{self.server}"') + + return SERVERS[self.server], {} diff --git a/src/mistralai/client/transcriptions.py b/src/mistralai/client/transcriptions.py new file mode 100644 index 00000000..7f01917d --- /dev/null +++ b/src/mistralai/client/transcriptions.py @@ -0,0 +1,470 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 75b45780c978 + +from .basesdk import BaseSDK +from mistralai.client import errors, models, utils +from mistralai.client._hooks import HookContext +from mistralai.client.types import OptionalNullable, UNSET +from mistralai.client.utils import eventstreaming, get_security_from_env +from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response +from typing import List, Mapping, Optional, Union + + +class Transcriptions(BaseSDK): + r"""API for audio transcription.""" + + def complete( + self, + *, + model: str, + file: Optional[Union[models.File, models.FileTypedDict]] = None, + file_url: OptionalNullable[str] = UNSET, + file_id: OptionalNullable[str] = UNSET, + language: OptionalNullable[str] = UNSET, + temperature: OptionalNullable[float] = UNSET, + diarize: Optional[bool] = False, + context_bias: Optional[List[str]] = None, + timestamp_granularities: Optional[List[models.TimestampGranularity]] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.TranscriptionResponse: + r"""Create Transcription + + :param model: ID of the model to be used. + :param file: + :param file_url: Url of a file to be transcribed + :param file_id: ID of a file uploaded to /v1/files + :param language: Language of the audio, e.g. 'en'. Providing the language can boost accuracy. + :param temperature: + :param diarize: + :param context_bias: + :param timestamp_granularities: Granularities of timestamps to include in the response. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AudioTranscriptionRequest( + model=model, + file=utils.get_pydantic_model(file, Optional[models.File]), + file_url=file_url, + file_id=file_id, + language=language, + temperature=temperature, + diarize=diarize, + context_bias=context_bias, + timestamp_granularities=timestamp_granularities, + ) + + req = self._build_request( + method="POST", + path="/v1/audio/transcriptions", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "multipart", models.AudioTranscriptionRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="audio_api_v1_transcriptions_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.TranscriptionResponse, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def complete_async( + self, + *, + model: str, + file: Optional[Union[models.File, models.FileTypedDict]] = None, + file_url: OptionalNullable[str] = UNSET, + file_id: OptionalNullable[str] = UNSET, + language: OptionalNullable[str] = UNSET, + temperature: OptionalNullable[float] = UNSET, + diarize: Optional[bool] = False, + context_bias: Optional[List[str]] = None, + timestamp_granularities: Optional[List[models.TimestampGranularity]] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.TranscriptionResponse: + r"""Create Transcription + + :param model: ID of the model to be used. + :param file: + :param file_url: Url of a file to be transcribed + :param file_id: ID of a file uploaded to /v1/files + :param language: Language of the audio, e.g. 'en'. Providing the language can boost accuracy. + :param temperature: + :param diarize: + :param context_bias: + :param timestamp_granularities: Granularities of timestamps to include in the response. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AudioTranscriptionRequest( + model=model, + file=utils.get_pydantic_model(file, Optional[models.File]), + file_url=file_url, + file_id=file_id, + language=language, + temperature=temperature, + diarize=diarize, + context_bias=context_bias, + timestamp_granularities=timestamp_granularities, + ) + + req = self._build_request_async( + method="POST", + path="/v1/audio/transcriptions", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "multipart", models.AudioTranscriptionRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="audio_api_v1_transcriptions_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.TranscriptionResponse, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def stream( + self, + *, + model: str, + file: Optional[Union[models.File, models.FileTypedDict]] = None, + file_url: OptionalNullable[str] = UNSET, + file_id: OptionalNullable[str] = UNSET, + language: OptionalNullable[str] = UNSET, + temperature: OptionalNullable[float] = UNSET, + diarize: Optional[bool] = False, + context_bias: Optional[List[str]] = None, + timestamp_granularities: Optional[List[models.TimestampGranularity]] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStream[models.TranscriptionStreamEvents]: + r"""Create Streaming Transcription (SSE) + + :param model: + :param file: + :param file_url: Url of a file to be transcribed + :param file_id: ID of a file uploaded to /v1/files + :param language: Language of the audio, e.g. 'en'. Providing the language can boost accuracy. + :param temperature: + :param diarize: + :param context_bias: + :param timestamp_granularities: Granularities of timestamps to include in the response. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AudioTranscriptionRequestStream( + model=model, + file=utils.get_pydantic_model(file, Optional[models.File]), + file_url=file_url, + file_id=file_id, + language=language, + temperature=temperature, + diarize=diarize, + context_bias=context_bias, + timestamp_granularities=timestamp_granularities, + ) + + req = self._build_request( + method="POST", + path="/v1/audio/transcriptions#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, + False, + False, + "multipart", + models.AudioTranscriptionRequestStream, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="audio_api_v1_transcriptions_post_stream", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStream( + http_res, + lambda raw: utils.unmarshal_json(raw, models.TranscriptionStreamEvents), + client_ref=self, + ) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) + + async def stream_async( + self, + *, + model: str, + file: Optional[Union[models.File, models.FileTypedDict]] = None, + file_url: OptionalNullable[str] = UNSET, + file_id: OptionalNullable[str] = UNSET, + language: OptionalNullable[str] = UNSET, + temperature: OptionalNullable[float] = UNSET, + diarize: Optional[bool] = False, + context_bias: Optional[List[str]] = None, + timestamp_granularities: Optional[List[models.TimestampGranularity]] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStreamAsync[models.TranscriptionStreamEvents]: + r"""Create Streaming Transcription (SSE) + + :param model: + :param file: + :param file_url: Url of a file to be transcribed + :param file_id: ID of a file uploaded to /v1/files + :param language: Language of the audio, e.g. 'en'. Providing the language can boost accuracy. + :param temperature: + :param diarize: + :param context_bias: + :param timestamp_granularities: Granularities of timestamps to include in the response. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AudioTranscriptionRequestStream( + model=model, + file=utils.get_pydantic_model(file, Optional[models.File]), + file_url=file_url, + file_id=file_id, + language=language, + temperature=temperature, + diarize=diarize, + context_bias=context_bias, + timestamp_granularities=timestamp_granularities, + ) + + req = self._build_request_async( + method="POST", + path="/v1/audio/transcriptions#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, + False, + False, + "multipart", + models.AudioTranscriptionRequestStream, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="audio_api_v1_transcriptions_post_stream", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStreamAsync( + http_res, + lambda raw: utils.unmarshal_json(raw, models.TranscriptionStreamEvents), + client_ref=self, + ) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) diff --git a/src/mistralai/client/types/__init__.py b/src/mistralai/client/types/__init__.py new file mode 100644 index 00000000..cf838643 --- /dev/null +++ b/src/mistralai/client/types/__init__.py @@ -0,0 +1,22 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 000b943f821c + +from .basemodel import ( + BaseModel, + Nullable, + OptionalNullable, + UnrecognizedInt, + UnrecognizedStr, + UNSET, + UNSET_SENTINEL, +) + +__all__ = [ + "BaseModel", + "Nullable", + "OptionalNullable", + "UnrecognizedInt", + "UnrecognizedStr", + "UNSET", + "UNSET_SENTINEL", +] diff --git a/src/mistralai/client/types/basemodel.py b/src/mistralai/client/types/basemodel.py new file mode 100644 index 00000000..4e889aa0 --- /dev/null +++ b/src/mistralai/client/types/basemodel.py @@ -0,0 +1,78 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 7ec465a1d3ff + +from pydantic import ConfigDict, model_serializer +from pydantic import BaseModel as PydanticBaseModel +from pydantic_core import core_schema +from typing import TYPE_CHECKING, Any, Literal, Optional, TypeVar, Union +from typing_extensions import TypeAliasType, TypeAlias + + +class BaseModel(PydanticBaseModel): + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, protected_namespaces=() + ) + + +class Unset(BaseModel): + @model_serializer(mode="plain") + def serialize_model(self): + return UNSET_SENTINEL + + def __bool__(self) -> Literal[False]: + return False + + +UNSET = Unset() +UNSET_SENTINEL = "~?~unset~?~sentinel~?~" + + +T = TypeVar("T") +if TYPE_CHECKING: + Nullable: TypeAlias = Union[T, None] + OptionalNullable: TypeAlias = Union[Optional[Nullable[T]], Unset] +else: + Nullable = TypeAliasType("Nullable", Union[T, None], type_params=(T,)) + OptionalNullable = TypeAliasType( + "OptionalNullable", Union[Optional[Nullable[T]], Unset], type_params=(T,) + ) + + +class UnrecognizedStr(str): + @classmethod + def __get_pydantic_core_schema__(cls, _source_type: Any, _handler: Any) -> core_schema.CoreSchema: + # Make UnrecognizedStr only work in lax mode, not strict mode + # This makes it a "fallback" option when more specific types (like Literals) don't match + def validate_lax(v: Any) -> 'UnrecognizedStr': + if isinstance(v, cls): + return v + return cls(str(v)) + + # Use lax_or_strict_schema where strict always fails + # This forces Pydantic to prefer other union members in strict mode + # and only fall back to UnrecognizedStr in lax mode + return core_schema.lax_or_strict_schema( + lax_schema=core_schema.chain_schema([ + core_schema.str_schema(), + core_schema.no_info_plain_validator_function(validate_lax) + ]), + strict_schema=core_schema.none_schema(), # Always fails in strict mode + ) + + +class UnrecognizedInt(int): + @classmethod + def __get_pydantic_core_schema__(cls, _source_type: Any, _handler: Any) -> core_schema.CoreSchema: + # Make UnrecognizedInt only work in lax mode, not strict mode + # This makes it a "fallback" option when more specific types (like Literals) don't match + def validate_lax(v: Any) -> 'UnrecognizedInt': + if isinstance(v, cls): + return v + return cls(int(v)) + return core_schema.lax_or_strict_schema( + lax_schema=core_schema.chain_schema([ + core_schema.int_schema(), + core_schema.no_info_plain_validator_function(validate_lax) + ]), + strict_schema=core_schema.none_schema(), # Always fails in strict mode + ) diff --git a/src/mistralai/client/utils/__init__.py b/src/mistralai/client/utils/__init__.py new file mode 100644 index 00000000..4bde281a --- /dev/null +++ b/src/mistralai/client/utils/__init__.py @@ -0,0 +1,182 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: b69505f4b269 + +from typing import Any, TYPE_CHECKING, Callable, TypeVar +import asyncio + +from .dynamic_imports import lazy_getattr, lazy_dir + +_T = TypeVar("_T") + + +async def run_sync_in_thread(func: Callable[..., _T], *args) -> _T: + """Run a synchronous function in a thread pool to avoid blocking the event loop.""" + return await asyncio.to_thread(func, *args) + + +if TYPE_CHECKING: + from .annotations import get_discriminator + from .datetimes import parse_datetime + from .enums import OpenEnumMeta + from .unions import parse_open_union + from .headers import get_headers, get_response_headers + from .metadata import ( + FieldMetadata, + find_metadata, + FormMetadata, + HeaderMetadata, + MultipartFormMetadata, + PathParamMetadata, + QueryParamMetadata, + RequestMetadata, + SecurityMetadata, + ) + from .queryparams import get_query_params + from .retries import BackoffStrategy, Retries, retry, retry_async, RetryConfig + from .requestbodies import serialize_request_body, SerializedRequestBody + from .security import get_security, get_security_from_env + + from .serializers import ( + get_pydantic_model, + marshal_json, + unmarshal, + unmarshal_json, + serialize_decimal, + serialize_float, + serialize_int, + stream_to_text, + stream_to_text_async, + stream_to_bytes, + stream_to_bytes_async, + validate_const, + validate_decimal, + validate_float, + validate_int, + ) + from .url import generate_url, template_url, remove_suffix + from .values import ( + get_global_from_env, + match_content_type, + match_status_codes, + match_response, + cast_partial, + ) + from .logger import Logger, get_body_content, get_default_logger + +__all__ = [ + "BackoffStrategy", + "FieldMetadata", + "find_metadata", + "FormMetadata", + "generate_url", + "get_body_content", + "get_default_logger", + "get_discriminator", + "parse_datetime", + "get_global_from_env", + "get_headers", + "get_pydantic_model", + "get_query_params", + "get_response_headers", + "get_security", + "get_security_from_env", + "HeaderMetadata", + "Logger", + "marshal_json", + "match_content_type", + "match_status_codes", + "match_response", + "MultipartFormMetadata", + "OpenEnumMeta", + "parse_open_union", + "PathParamMetadata", + "QueryParamMetadata", + "remove_suffix", + "Retries", + "retry", + "retry_async", + "RetryConfig", + "RequestMetadata", + "SecurityMetadata", + "serialize_decimal", + "serialize_float", + "serialize_int", + "serialize_request_body", + "SerializedRequestBody", + "stream_to_text", + "stream_to_text_async", + "stream_to_bytes", + "stream_to_bytes_async", + "template_url", + "unmarshal", + "unmarshal_json", + "validate_decimal", + "validate_const", + "validate_float", + "validate_int", + "cast_partial", +] + +_dynamic_imports: dict[str, str] = { + "BackoffStrategy": ".retries", + "FieldMetadata": ".metadata", + "find_metadata": ".metadata", + "FormMetadata": ".metadata", + "generate_url": ".url", + "get_body_content": ".logger", + "get_default_logger": ".logger", + "get_discriminator": ".annotations", + "parse_datetime": ".datetimes", + "get_global_from_env": ".values", + "get_headers": ".headers", + "get_pydantic_model": ".serializers", + "get_query_params": ".queryparams", + "get_response_headers": ".headers", + "get_security": ".security", + "get_security_from_env": ".security", + "HeaderMetadata": ".metadata", + "Logger": ".logger", + "marshal_json": ".serializers", + "match_content_type": ".values", + "match_status_codes": ".values", + "match_response": ".values", + "MultipartFormMetadata": ".metadata", + "OpenEnumMeta": ".enums", + "parse_open_union": ".unions", + "PathParamMetadata": ".metadata", + "QueryParamMetadata": ".metadata", + "remove_suffix": ".url", + "Retries": ".retries", + "retry": ".retries", + "retry_async": ".retries", + "RetryConfig": ".retries", + "RequestMetadata": ".metadata", + "SecurityMetadata": ".metadata", + "serialize_decimal": ".serializers", + "serialize_float": ".serializers", + "serialize_int": ".serializers", + "serialize_request_body": ".requestbodies", + "SerializedRequestBody": ".requestbodies", + "stream_to_text": ".serializers", + "stream_to_text_async": ".serializers", + "stream_to_bytes": ".serializers", + "stream_to_bytes_async": ".serializers", + "template_url": ".url", + "unmarshal": ".serializers", + "unmarshal_json": ".serializers", + "validate_decimal": ".serializers", + "validate_const": ".serializers", + "validate_float": ".serializers", + "validate_int": ".serializers", + "cast_partial": ".values", +} + + +def __getattr__(attr_name: str) -> Any: + return lazy_getattr( + attr_name, package=__package__, dynamic_imports=_dynamic_imports + ) + + +def __dir__(): + return lazy_dir(dynamic_imports=_dynamic_imports) diff --git a/src/mistralai/client/utils/annotations.py b/src/mistralai/client/utils/annotations.py new file mode 100644 index 00000000..4b60ab8e --- /dev/null +++ b/src/mistralai/client/utils/annotations.py @@ -0,0 +1,80 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 1ffdedfc66a2 + +from enum import Enum +from typing import Any, Optional + + +def get_discriminator(model: Any, fieldname: str, key: str) -> str: + """ + Recursively search for the discriminator attribute in a model. + + Args: + model (Any): The model to search within. + fieldname (str): The name of the field to search for. + key (str): The key to search for in dictionaries. + + Returns: + str: The name of the discriminator attribute. + + Raises: + ValueError: If the discriminator attribute is not found. + """ + upper_fieldname = fieldname.upper() + + def get_field_discriminator(field: Any) -> Optional[str]: + """Search for the discriminator attribute in a given field.""" + + if isinstance(field, dict): + if key in field: + return f"{field[key]}" + + if hasattr(field, fieldname): + attr = getattr(field, fieldname) + if isinstance(attr, Enum): + return f"{attr.value}" + return f"{attr}" + + if hasattr(field, upper_fieldname): + attr = getattr(field, upper_fieldname) + if isinstance(attr, Enum): + return f"{attr.value}" + return f"{attr}" + + return None + + def search_nested_discriminator(obj: Any) -> Optional[str]: + """Recursively search for discriminator in nested structures.""" + # First try direct field lookup + discriminator = get_field_discriminator(obj) + if discriminator is not None: + return discriminator + + # If it's a dict, search in nested values + if isinstance(obj, dict): + for value in obj.values(): + if isinstance(value, list): + # Search in list items + for item in value: + nested_discriminator = search_nested_discriminator(item) + if nested_discriminator is not None: + return nested_discriminator + elif isinstance(value, dict): + # Search in nested dict + nested_discriminator = search_nested_discriminator(value) + if nested_discriminator is not None: + return nested_discriminator + + return None + + if isinstance(model, list): + for field in model: + discriminator = search_nested_discriminator(field) + if discriminator is not None: + return discriminator + + discriminator = search_nested_discriminator(model) + if discriminator is not None: + return discriminator + + raise ValueError(f"Could not find discriminator field {fieldname} in {model}") diff --git a/src/mistralai/client/utils/datetimes.py b/src/mistralai/client/utils/datetimes.py new file mode 100644 index 00000000..a2c94fac --- /dev/null +++ b/src/mistralai/client/utils/datetimes.py @@ -0,0 +1,24 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: c40066d868c9 + +from datetime import datetime +import sys + + +def parse_datetime(datetime_string: str) -> datetime: + """ + Convert a RFC 3339 / ISO 8601 formatted string into a datetime object. + Python versions 3.11 and later support parsing RFC 3339 directly with + datetime.fromisoformat(), but for earlier versions, this function + encapsulates the necessary extra logic. + """ + # Python 3.11 and later can parse RFC 3339 directly + if sys.version_info >= (3, 11): + return datetime.fromisoformat(datetime_string) + + # For Python 3.10 and earlier, a common ValueError is trailing 'Z' suffix, + # so fix that upfront. + if datetime_string.endswith("Z"): + datetime_string = datetime_string[:-1] + "+00:00" + + return datetime.fromisoformat(datetime_string) diff --git a/src/mistralai/client/utils/dynamic_imports.py b/src/mistralai/client/utils/dynamic_imports.py new file mode 100644 index 00000000..969f2fc7 --- /dev/null +++ b/src/mistralai/client/utils/dynamic_imports.py @@ -0,0 +1,55 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: ac9918d925c0 + +from importlib import import_module +import builtins +import sys + + +def dynamic_import(package, modname, retries=3): + """Import a module relative to package, retrying on KeyError from half-initialized modules.""" + for attempt in range(retries): + try: + return import_module(modname, package) + except KeyError: + sys.modules.pop(modname, None) + if attempt == retries - 1: + break + raise KeyError(f"Failed to import module '{modname}' after {retries} attempts") + + +def lazy_getattr(attr_name, *, package, dynamic_imports, sub_packages=None): + """Module-level __getattr__ that lazily loads from a dynamic_imports mapping. + + Args: + attr_name: The attribute being looked up. + package: The caller's __package__ (for relative imports). + dynamic_imports: Dict mapping attribute names to relative module paths. + sub_packages: Optional list of subpackage names to lazy-load. + """ + module_name = dynamic_imports.get(attr_name) + if module_name is not None: + try: + module = dynamic_import(package, module_name) + return getattr(module, attr_name) + except ImportError as e: + raise ImportError( + f"Failed to import {attr_name} from {module_name}: {e}" + ) from e + except AttributeError as e: + raise AttributeError( + f"Failed to get {attr_name} from {module_name}: {e}" + ) from e + + if sub_packages and attr_name in sub_packages: + return import_module(f".{attr_name}", package) + + raise AttributeError(f"module '{package}' has no attribute '{attr_name}'") + + +def lazy_dir(*, dynamic_imports, sub_packages=None): + """Module-level __dir__ that lists lazily-loadable attributes.""" + lazy_attrs = builtins.list(dynamic_imports.keys()) + if sub_packages: + lazy_attrs.extend(sub_packages) + return builtins.sorted(lazy_attrs) diff --git a/src/mistralai/client/utils/enums.py b/src/mistralai/client/utils/enums.py new file mode 100644 index 00000000..d897495f --- /dev/null +++ b/src/mistralai/client/utils/enums.py @@ -0,0 +1,135 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: a0735873b5ac + +import enum +import sys +from typing import Any + +from pydantic_core import core_schema + + +class OpenEnumMeta(enum.EnumMeta): + # The __call__ method `boundary` kwarg was added in 3.11 and must be present + # for pyright. Refer also: https://github.com/pylint-dev/pylint/issues/9622 + # pylint: disable=unexpected-keyword-arg + # The __call__ method `values` varg must be named for pyright. + # pylint: disable=keyword-arg-before-vararg + + if sys.version_info >= (3, 11): + def __call__( + cls, value, names=None, *values, module=None, qualname=None, type=None, start=1, boundary=None + ): + # The `type` kwarg also happens to be a built-in that pylint flags as + # redeclared. Safe to ignore this lint rule with this scope. + # pylint: disable=redefined-builtin + + if names is not None: + return super().__call__( + value, + names=names, + *values, + module=module, + qualname=qualname, + type=type, + start=start, + boundary=boundary, + ) + + try: + return super().__call__( + value, + names=names, # pyright: ignore[reportArgumentType] + *values, + module=module, + qualname=qualname, + type=type, + start=start, + boundary=boundary, + ) + except ValueError: + return value + else: + def __call__( + cls, value, names=None, *, module=None, qualname=None, type=None, start=1 + ): + # The `type` kwarg also happens to be a built-in that pylint flags as + # redeclared. Safe to ignore this lint rule with this scope. + # pylint: disable=redefined-builtin + + if names is not None: + return super().__call__( + value, + names=names, + module=module, + qualname=qualname, + type=type, + start=start, + ) + + try: + return super().__call__( + value, + names=names, # pyright: ignore[reportArgumentType] + module=module, + qualname=qualname, + type=type, + start=start, + ) + except ValueError: + return value + + def __new__(mcs, name, bases, namespace, **kwargs): + cls = super().__new__(mcs, name, bases, namespace, **kwargs) + + # Add __get_pydantic_core_schema__ to make open enums work correctly + # in union discrimination. In strict mode (used by Pydantic for unions), + # only known enum values match. In lax mode, unknown values are accepted. + def __get_pydantic_core_schema__( + cls_inner: Any, _source_type: Any, _handler: Any + ) -> core_schema.CoreSchema: + # Create a validator that only accepts known enum values (for strict mode) + def validate_strict(v: Any) -> Any: + if isinstance(v, cls_inner): + return v + # Use the parent EnumMeta's __call__ which raises ValueError for unknown values + return enum.EnumMeta.__call__(cls_inner, v) + + # Create a lax validator that accepts unknown values + def validate_lax(v: Any) -> Any: + if isinstance(v, cls_inner): + return v + try: + return enum.EnumMeta.__call__(cls_inner, v) + except ValueError: + # Return the raw value for unknown enum values + return v + + # Determine the base type schema (str or int) + is_int_enum = False + for base in cls_inner.__mro__: + if base is int: + is_int_enum = True + break + if base is str: + break + + base_schema = ( + core_schema.int_schema() + if is_int_enum + else core_schema.str_schema() + ) + + # Use lax_or_strict_schema: + # - strict mode: only known enum values match (raises ValueError for unknown) + # - lax mode: accept any value, return enum member or raw value + return core_schema.lax_or_strict_schema( + lax_schema=core_schema.chain_schema( + [base_schema, core_schema.no_info_plain_validator_function(validate_lax)] + ), + strict_schema=core_schema.chain_schema( + [base_schema, core_schema.no_info_plain_validator_function(validate_strict)] + ), + ) + + setattr(cls, "__get_pydantic_core_schema__", classmethod(__get_pydantic_core_schema__)) + return cls diff --git a/src/mistralai/client/utils/eventstreaming.py b/src/mistralai/client/utils/eventstreaming.py new file mode 100644 index 00000000..19a12152 --- /dev/null +++ b/src/mistralai/client/utils/eventstreaming.py @@ -0,0 +1,281 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 3263d7502030 + +import re +import json +from dataclasses import dataclass, asdict +from typing import ( + Any, + Callable, + Generic, + TypeVar, + Optional, + Generator, + AsyncGenerator, + Tuple, +) +import httpx + +T = TypeVar("T") + + +class EventStream(Generic[T]): + # Holds a reference to the SDK client to avoid it being garbage collected + # and cause termination of the underlying httpx client. + client_ref: Optional[object] + response: httpx.Response + generator: Generator[T, None, None] + _closed: bool + + def __init__( + self, + response: httpx.Response, + decoder: Callable[[str], T], + sentinel: Optional[str] = None, + client_ref: Optional[object] = None, + ): + self.response = response + self.generator = stream_events(response, decoder, sentinel) + self.client_ref = client_ref + self._closed = False + + def __iter__(self): + return self + + def __next__(self): + if self._closed: + raise StopIteration + return next(self.generator) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self._closed = True + self.response.close() + + +class EventStreamAsync(Generic[T]): + # Holds a reference to the SDK client to avoid it being garbage collected + # and cause termination of the underlying httpx client. + client_ref: Optional[object] + response: httpx.Response + generator: AsyncGenerator[T, None] + _closed: bool + + def __init__( + self, + response: httpx.Response, + decoder: Callable[[str], T], + sentinel: Optional[str] = None, + client_ref: Optional[object] = None, + ): + self.response = response + self.generator = stream_events_async(response, decoder, sentinel) + self.client_ref = client_ref + self._closed = False + + def __aiter__(self): + return self + + async def __anext__(self): + if self._closed: + raise StopAsyncIteration + return await self.generator.__anext__() + + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc_val, exc_tb): + self._closed = True + await self.response.aclose() + + +@dataclass +class ServerEvent: + id: Optional[str] = None + event: Optional[str] = None + data: Any = None + retry: Optional[int] = None + + +MESSAGE_BOUNDARIES = [ + b"\r\n\r\n", + b"\r\n\r", + b"\r\n\n", + b"\r\r\n", + b"\n\r\n", + b"\r\r", + b"\n\r", + b"\n\n", +] + +UTF8_BOM = b"\xef\xbb\xbf" + + +async def stream_events_async( + response: httpx.Response, + decoder: Callable[[str], T], + sentinel: Optional[str] = None, +) -> AsyncGenerator[T, None]: + buffer = bytearray() + position = 0 + event_id: Optional[str] = None + async for chunk in response.aiter_bytes(): + if len(buffer) == 0 and chunk.startswith(UTF8_BOM): + chunk = chunk[len(UTF8_BOM) :] + buffer += chunk + for i in range(position, len(buffer)): + char = buffer[i : i + 1] + seq: Optional[bytes] = None + if char in [b"\r", b"\n"]: + for boundary in MESSAGE_BOUNDARIES: + seq = _peek_sequence(i, buffer, boundary) + if seq is not None: + break + if seq is None: + continue + + block = buffer[position:i] + position = i + len(seq) + event, discard, event_id = _parse_event( + raw=block, decoder=decoder, sentinel=sentinel, event_id=event_id + ) + if event is not None: + yield event + if discard: + await response.aclose() + return + + if position > 0: + buffer = buffer[position:] + position = 0 + + event, discard, _ = _parse_event( + raw=buffer, decoder=decoder, sentinel=sentinel, event_id=event_id + ) + if event is not None: + yield event + + +def stream_events( + response: httpx.Response, + decoder: Callable[[str], T], + sentinel: Optional[str] = None, +) -> Generator[T, None, None]: + buffer = bytearray() + position = 0 + event_id: Optional[str] = None + for chunk in response.iter_bytes(): + if len(buffer) == 0 and chunk.startswith(UTF8_BOM): + chunk = chunk[len(UTF8_BOM) :] + buffer += chunk + for i in range(position, len(buffer)): + char = buffer[i : i + 1] + seq: Optional[bytes] = None + if char in [b"\r", b"\n"]: + for boundary in MESSAGE_BOUNDARIES: + seq = _peek_sequence(i, buffer, boundary) + if seq is not None: + break + if seq is None: + continue + + block = buffer[position:i] + position = i + len(seq) + event, discard, event_id = _parse_event( + raw=block, decoder=decoder, sentinel=sentinel, event_id=event_id + ) + if event is not None: + yield event + if discard: + response.close() + return + + if position > 0: + buffer = buffer[position:] + position = 0 + + event, discard, _ = _parse_event( + raw=buffer, decoder=decoder, sentinel=sentinel, event_id=event_id + ) + if event is not None: + yield event + + +def _parse_event( + *, + raw: bytearray, + decoder: Callable[[str], T], + sentinel: Optional[str] = None, + event_id: Optional[str] = None, +) -> Tuple[Optional[T], bool, Optional[str]]: + block = raw.decode() + lines = re.split(r"\r?\n|\r", block) + publish = False + event = ServerEvent() + data = "" + for line in lines: + if not line: + continue + + delim = line.find(":") + if delim == 0: + continue + + field = line + value = "" + if delim > 0: + field = line[0:delim] + value = line[delim + 1 :] if delim < len(line) - 1 else "" + if len(value) and value[0] == " ": + value = value[1:] + + if field == "event": + event.event = value + publish = True + elif field == "data": + data += value + "\n" + publish = True + elif field == "id": + publish = True + if "\x00" not in value: + event_id = value + elif field == "retry": + if value.isdigit(): + event.retry = int(value) + publish = True + + event.id = event_id + + if sentinel and data == f"{sentinel}\n": + return None, True, event_id + + if data: + data = data[:-1] + try: + event.data = json.loads(data) + except json.JSONDecodeError: + event.data = data + + out = None + if publish: + out_dict = { + k: v + for k, v in asdict(event).items() + if v is not None or (k == "data" and data) + } + out = decoder(json.dumps(out_dict)) + + return out, False, event_id + + +def _peek_sequence(position: int, buffer: bytearray, sequence: bytes): + if len(sequence) > (len(buffer) - position): + return None + + for i, seq in enumerate(sequence): + if buffer[position + i] != seq: + return None + + return sequence diff --git a/src/mistralai/client/utils/forms.py b/src/mistralai/client/utils/forms.py new file mode 100644 index 00000000..6facec53 --- /dev/null +++ b/src/mistralai/client/utils/forms.py @@ -0,0 +1,235 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 58842e905fce + +from typing import ( + Any, + Dict, + get_type_hints, + List, + Tuple, +) +from pydantic import BaseModel +from pydantic.fields import FieldInfo + +from .serializers import marshal_json + +from .metadata import ( + FormMetadata, + MultipartFormMetadata, + find_field_metadata, +) +from .values import _is_set, _val_to_string + + +def _populate_form( + field_name: str, + explode: bool, + obj: Any, + delimiter: str, + form: Dict[str, List[str]], +): + if not _is_set(obj): + return form + + if isinstance(obj, BaseModel): + items = [] + + obj_fields: Dict[str, FieldInfo] = obj.__class__.model_fields + for name in obj_fields: + obj_field = obj_fields[name] + obj_field_name = obj_field.alias if obj_field.alias is not None else name + if obj_field_name == "": + continue + + val = getattr(obj, name) + if not _is_set(val): + continue + + if explode: + form[obj_field_name] = [_val_to_string(val)] + else: + items.append(f"{obj_field_name}{delimiter}{_val_to_string(val)}") + + if len(items) > 0: + form[field_name] = [delimiter.join(items)] + elif isinstance(obj, Dict): + items = [] + for key, value in obj.items(): + if not _is_set(value): + continue + + if explode: + form[key] = [_val_to_string(value)] + else: + items.append(f"{key}{delimiter}{_val_to_string(value)}") + + if len(items) > 0: + form[field_name] = [delimiter.join(items)] + elif isinstance(obj, List): + items = [] + + for value in obj: + if not _is_set(value): + continue + + if explode: + if not field_name in form: + form[field_name] = [] + form[field_name].append(_val_to_string(value)) + else: + items.append(_val_to_string(value)) + + if len(items) > 0: + form[field_name] = [delimiter.join([str(item) for item in items])] + else: + form[field_name] = [_val_to_string(obj)] + + return form + + +def _extract_file_properties(file_obj: Any) -> Tuple[str, Any, Any]: + """Extract file name, content, and content type from a file object.""" + file_fields: Dict[str, FieldInfo] = file_obj.__class__.model_fields + + file_name = "" + content = None + content_type = None + + for file_field_name in file_fields: + file_field = file_fields[file_field_name] + + file_metadata = find_field_metadata(file_field, MultipartFormMetadata) + if file_metadata is None: + continue + + if file_metadata.content: + content = getattr(file_obj, file_field_name, None) + elif file_field_name == "content_type": + content_type = getattr(file_obj, file_field_name, None) + else: + file_name = getattr(file_obj, file_field_name) + + if file_name == "" or content is None: + raise ValueError("invalid multipart/form-data file") + + return file_name, content, content_type + + +def serialize_multipart_form( + media_type: str, request: Any +) -> Tuple[str, Dict[str, Any], List[Tuple[str, Any]]]: + form: Dict[str, Any] = {} + files: List[Tuple[str, Any]] = [] + + if not isinstance(request, BaseModel): + raise TypeError("invalid request body type") + + request_fields: Dict[str, FieldInfo] = request.__class__.model_fields + request_field_types = get_type_hints(request.__class__) + + for name in request_fields: + field = request_fields[name] + + val = getattr(request, name) + if not _is_set(val): + continue + + field_metadata = find_field_metadata(field, MultipartFormMetadata) + if not field_metadata: + continue + + f_name = field.alias if field.alias else name + + if field_metadata.file: + if isinstance(val, List): + # Handle array of files + array_field_name = f_name + for file_obj in val: + if not _is_set(file_obj): + continue + + file_name, content, content_type = _extract_file_properties( + file_obj + ) + + if content_type is not None: + files.append( + (array_field_name, (file_name, content, content_type)) + ) + else: + files.append((array_field_name, (file_name, content))) + else: + # Handle single file + file_name, content, content_type = _extract_file_properties(val) + + if content_type is not None: + files.append((f_name, (file_name, content, content_type))) + else: + files.append((f_name, (file_name, content))) + elif field_metadata.json: + files.append( + ( + f_name, + ( + None, + marshal_json(val, request_field_types[name]), + "application/json", + ), + ) + ) + else: + if isinstance(val, List): + values = [] + + for value in val: + if not _is_set(value): + continue + values.append(_val_to_string(value)) + + array_field_name = f_name + form[array_field_name] = values + else: + form[f_name] = _val_to_string(val) + return media_type, form, files + + +def serialize_form_data(data: Any) -> Dict[str, Any]: + form: Dict[str, List[str]] = {} + + if isinstance(data, BaseModel): + data_fields: Dict[str, FieldInfo] = data.__class__.model_fields + data_field_types = get_type_hints(data.__class__) + for name in data_fields: + field = data_fields[name] + + val = getattr(data, name) + if not _is_set(val): + continue + + metadata = find_field_metadata(field, FormMetadata) + if metadata is None: + continue + + f_name = field.alias if field.alias is not None else name + + if metadata.json: + form[f_name] = [marshal_json(val, data_field_types[name])] + else: + if metadata.style == "form": + _populate_form( + f_name, + metadata.explode, + val, + ",", + form, + ) + else: + raise ValueError(f"Invalid form style for field {name}") + elif isinstance(data, Dict): + for key, value in data.items(): + if _is_set(value): + form[key] = [_val_to_string(value)] + else: + raise TypeError(f"Invalid request body type {type(data)} for form data") + + return form diff --git a/src/mistralai/client/utils/headers.py b/src/mistralai/client/utils/headers.py new file mode 100644 index 00000000..64911872 --- /dev/null +++ b/src/mistralai/client/utils/headers.py @@ -0,0 +1,137 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 9066de2ead8b + +from typing import ( + Any, + Dict, + List, + Optional, +) +from httpx import Headers +from pydantic import BaseModel +from pydantic.fields import FieldInfo + +from .metadata import ( + HeaderMetadata, + find_field_metadata, +) + +from .values import _is_set, _populate_from_globals, _val_to_string + + +def get_headers(headers_params: Any, gbls: Optional[Any] = None) -> Dict[str, str]: + headers: Dict[str, str] = {} + + globals_already_populated = [] + if _is_set(headers_params): + globals_already_populated = _populate_headers(headers_params, gbls, headers, []) + if _is_set(gbls): + _populate_headers(gbls, None, headers, globals_already_populated) + + return headers + + +def _populate_headers( + headers_params: Any, + gbls: Any, + header_values: Dict[str, str], + skip_fields: List[str], +) -> List[str]: + globals_already_populated: List[str] = [] + + if not isinstance(headers_params, BaseModel): + return globals_already_populated + + param_fields: Dict[str, FieldInfo] = headers_params.__class__.model_fields + for name in param_fields: + if name in skip_fields: + continue + + field = param_fields[name] + f_name = field.alias if field.alias is not None else name + + metadata = find_field_metadata(field, HeaderMetadata) + if metadata is None: + continue + + value, global_found = _populate_from_globals( + name, getattr(headers_params, name), HeaderMetadata, gbls + ) + if global_found: + globals_already_populated.append(name) + value = _serialize_header(metadata.explode, value) + + if value != "": + header_values[f_name] = value + + return globals_already_populated + + +def _serialize_header(explode: bool, obj: Any) -> str: + if not _is_set(obj): + return "" + + if isinstance(obj, BaseModel): + items = [] + obj_fields: Dict[str, FieldInfo] = obj.__class__.model_fields + for name in obj_fields: + obj_field = obj_fields[name] + obj_param_metadata = find_field_metadata(obj_field, HeaderMetadata) + + if not obj_param_metadata: + continue + + f_name = obj_field.alias if obj_field.alias is not None else name + + val = getattr(obj, name) + if not _is_set(val): + continue + + if explode: + items.append(f"{f_name}={_val_to_string(val)}") + else: + items.append(f_name) + items.append(_val_to_string(val)) + + if len(items) > 0: + return ",".join(items) + elif isinstance(obj, Dict): + items = [] + + for key, value in obj.items(): + if not _is_set(value): + continue + + if explode: + items.append(f"{key}={_val_to_string(value)}") + else: + items.append(key) + items.append(_val_to_string(value)) + + if len(items) > 0: + return ",".join([str(item) for item in items]) + elif isinstance(obj, List): + items = [] + + for value in obj: + if not _is_set(value): + continue + + items.append(_val_to_string(value)) + + if len(items) > 0: + return ",".join(items) + elif _is_set(obj): + return f"{_val_to_string(obj)}" + + return "" + + +def get_response_headers(headers: Headers) -> Dict[str, List[str]]: + res: Dict[str, List[str]] = {} + for k, v in headers.items(): + if not k in res: + res[k] = [] + + res[k].append(v) + return res diff --git a/src/mistralai/client/utils/logger.py b/src/mistralai/client/utils/logger.py new file mode 100644 index 00000000..3edad830 --- /dev/null +++ b/src/mistralai/client/utils/logger.py @@ -0,0 +1,28 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 745023607a1f + +import httpx +import logging +import os +from typing import Any, Protocol + + +class Logger(Protocol): + def debug(self, msg: str, *args: Any, **kwargs: Any) -> None: + pass + + +class NoOpLogger: + def debug(self, msg: str, *args: Any, **kwargs: Any) -> None: + pass + + +def get_body_content(req: httpx.Request) -> str: + return "" if not hasattr(req, "_content") else str(req.content) + + +def get_default_logger() -> Logger: + if os.getenv("MISTRAL_DEBUG"): + logging.basicConfig(level=logging.DEBUG) + return logging.getLogger("mistralai.client") + return NoOpLogger() diff --git a/src/mistralai/client/utils/metadata.py b/src/mistralai/client/utils/metadata.py new file mode 100644 index 00000000..d46ffa59 --- /dev/null +++ b/src/mistralai/client/utils/metadata.py @@ -0,0 +1,119 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: d49d535ae52c + +from typing import Optional, Type, TypeVar, Union +from dataclasses import dataclass +from pydantic.fields import FieldInfo + + +T = TypeVar("T") + + +@dataclass +class SecurityMetadata: + option: bool = False + scheme: bool = False + scheme_type: Optional[str] = None + sub_type: Optional[str] = None + field_name: Optional[str] = None + + def get_field_name(self, default: str) -> str: + return self.field_name or default + + +@dataclass +class ParamMetadata: + serialization: Optional[str] = None + style: str = "simple" + explode: bool = False + + +@dataclass +class PathParamMetadata(ParamMetadata): + pass + + +@dataclass +class QueryParamMetadata(ParamMetadata): + style: str = "form" + explode: bool = True + + +@dataclass +class HeaderMetadata(ParamMetadata): + pass + + +@dataclass +class RequestMetadata: + media_type: str = "application/octet-stream" + + +@dataclass +class MultipartFormMetadata: + file: bool = False + content: bool = False + json: bool = False + + +@dataclass +class FormMetadata: + json: bool = False + style: str = "form" + explode: bool = True + + +class FieldMetadata: + security: Optional[SecurityMetadata] = None + path: Optional[PathParamMetadata] = None + query: Optional[QueryParamMetadata] = None + header: Optional[HeaderMetadata] = None + request: Optional[RequestMetadata] = None + form: Optional[FormMetadata] = None + multipart: Optional[MultipartFormMetadata] = None + + def __init__( + self, + security: Optional[SecurityMetadata] = None, + path: Optional[Union[PathParamMetadata, bool]] = None, + query: Optional[Union[QueryParamMetadata, bool]] = None, + header: Optional[Union[HeaderMetadata, bool]] = None, + request: Optional[Union[RequestMetadata, bool]] = None, + form: Optional[Union[FormMetadata, bool]] = None, + multipart: Optional[Union[MultipartFormMetadata, bool]] = None, + ): + self.security = security + self.path = PathParamMetadata() if isinstance(path, bool) else path + self.query = QueryParamMetadata() if isinstance(query, bool) else query + self.header = HeaderMetadata() if isinstance(header, bool) else header + self.request = RequestMetadata() if isinstance(request, bool) else request + self.form = FormMetadata() if isinstance(form, bool) else form + self.multipart = ( + MultipartFormMetadata() if isinstance(multipart, bool) else multipart + ) + + +def find_field_metadata(field_info: FieldInfo, metadata_type: Type[T]) -> Optional[T]: + metadata = find_metadata(field_info, FieldMetadata) + if not metadata: + return None + + fields = metadata.__dict__ + + for field in fields: + if isinstance(fields[field], metadata_type): + return fields[field] + + return None + + +def find_metadata(field_info: FieldInfo, metadata_type: Type[T]) -> Optional[T]: + metadata = field_info.metadata + if not metadata: + return None + + for md in metadata: + if isinstance(md, metadata_type): + return md + + return None diff --git a/src/mistralai/client/utils/queryparams.py b/src/mistralai/client/utils/queryparams.py new file mode 100644 index 00000000..0b78c548 --- /dev/null +++ b/src/mistralai/client/utils/queryparams.py @@ -0,0 +1,218 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: bb77d4664844 + +from typing import ( + Any, + Dict, + get_type_hints, + List, + Optional, +) + +from pydantic import BaseModel +from pydantic.fields import FieldInfo + +from .metadata import ( + QueryParamMetadata, + find_field_metadata, +) +from .values import ( + _get_serialized_params, + _is_set, + _populate_from_globals, + _val_to_string, +) +from .forms import _populate_form + + +def get_query_params( + query_params: Any, + gbls: Optional[Any] = None, + allow_empty_value: Optional[List[str]] = None, +) -> Dict[str, List[str]]: + params: Dict[str, List[str]] = {} + + globals_already_populated = _populate_query_params(query_params, gbls, params, [], allow_empty_value) + if _is_set(gbls): + _populate_query_params(gbls, None, params, globals_already_populated, allow_empty_value) + + return params + + +def _populate_query_params( + query_params: Any, + gbls: Any, + query_param_values: Dict[str, List[str]], + skip_fields: List[str], + allow_empty_value: Optional[List[str]] = None, +) -> List[str]: + globals_already_populated: List[str] = [] + + if not isinstance(query_params, BaseModel): + return globals_already_populated + + param_fields: Dict[str, FieldInfo] = query_params.__class__.model_fields + param_field_types = get_type_hints(query_params.__class__) + for name in param_fields: + if name in skip_fields: + continue + + field = param_fields[name] + + metadata = find_field_metadata(field, QueryParamMetadata) + if not metadata: + continue + + value = getattr(query_params, name) if _is_set(query_params) else None + + value, global_found = _populate_from_globals( + name, value, QueryParamMetadata, gbls + ) + if global_found: + globals_already_populated.append(name) + + f_name = field.alias if field.alias is not None else name + + allow_empty_set = set(allow_empty_value or []) + should_include_empty = f_name in allow_empty_set and ( + value is None or value == [] or value == "" + ) + + if should_include_empty: + query_param_values[f_name] = [""] + continue + + serialization = metadata.serialization + if serialization is not None: + serialized_parms = _get_serialized_params( + metadata, f_name, value, param_field_types[name] + ) + for key, value in serialized_parms.items(): + if key in query_param_values: + query_param_values[key].extend(value) + else: + query_param_values[key] = [value] + else: + style = metadata.style + if style == "deepObject": + _populate_deep_object_query_params(f_name, value, query_param_values) + elif style == "form": + _populate_delimited_query_params( + metadata, f_name, value, ",", query_param_values + ) + elif style == "pipeDelimited": + _populate_delimited_query_params( + metadata, f_name, value, "|", query_param_values + ) + else: + raise NotImplementedError( + f"query param style {style} not yet supported" + ) + + return globals_already_populated + + +def _populate_deep_object_query_params( + field_name: str, + obj: Any, + params: Dict[str, List[str]], +): + if not _is_set(obj): + return + + if isinstance(obj, BaseModel): + _populate_deep_object_query_params_basemodel(field_name, obj, params) + elif isinstance(obj, Dict): + _populate_deep_object_query_params_dict(field_name, obj, params) + + +def _populate_deep_object_query_params_basemodel( + prior_params_key: str, + obj: Any, + params: Dict[str, List[str]], +): + if not _is_set(obj) or not isinstance(obj, BaseModel): + return + + obj_fields: Dict[str, FieldInfo] = obj.__class__.model_fields + for name in obj_fields: + obj_field = obj_fields[name] + + f_name = obj_field.alias if obj_field.alias is not None else name + + params_key = f"{prior_params_key}[{f_name}]" + + obj_param_metadata = find_field_metadata(obj_field, QueryParamMetadata) + if not _is_set(obj_param_metadata): + continue + + obj_val = getattr(obj, name) + if not _is_set(obj_val): + continue + + if isinstance(obj_val, BaseModel): + _populate_deep_object_query_params_basemodel(params_key, obj_val, params) + elif isinstance(obj_val, Dict): + _populate_deep_object_query_params_dict(params_key, obj_val, params) + elif isinstance(obj_val, List): + _populate_deep_object_query_params_list(params_key, obj_val, params) + else: + params[params_key] = [_val_to_string(obj_val)] + + +def _populate_deep_object_query_params_dict( + prior_params_key: str, + value: Dict, + params: Dict[str, List[str]], +): + if not _is_set(value): + return + + for key, val in value.items(): + if not _is_set(val): + continue + + params_key = f"{prior_params_key}[{key}]" + + if isinstance(val, BaseModel): + _populate_deep_object_query_params_basemodel(params_key, val, params) + elif isinstance(val, Dict): + _populate_deep_object_query_params_dict(params_key, val, params) + elif isinstance(val, List): + _populate_deep_object_query_params_list(params_key, val, params) + else: + params[params_key] = [_val_to_string(val)] + + +def _populate_deep_object_query_params_list( + params_key: str, + value: List, + params: Dict[str, List[str]], +): + if not _is_set(value): + return + + for val in value: + if not _is_set(val): + continue + + if params.get(params_key) is None: + params[params_key] = [] + + params[params_key].append(_val_to_string(val)) + + +def _populate_delimited_query_params( + metadata: QueryParamMetadata, + field_name: str, + obj: Any, + delimiter: str, + query_param_values: Dict[str, List[str]], +): + _populate_form( + field_name, + metadata.explode, + obj, + delimiter, + query_param_values, + ) diff --git a/src/mistralai/client/utils/requestbodies.py b/src/mistralai/client/utils/requestbodies.py new file mode 100644 index 00000000..3aae69c7 --- /dev/null +++ b/src/mistralai/client/utils/requestbodies.py @@ -0,0 +1,67 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 946cfcd26ee4 + +import io +from dataclasses import dataclass +import re +from typing import ( + Any, + Optional, +) + +from .forms import serialize_form_data, serialize_multipart_form + +from .serializers import marshal_json + +SERIALIZATION_METHOD_TO_CONTENT_TYPE = { + "json": "application/json", + "form": "application/x-www-form-urlencoded", + "multipart": "multipart/form-data", + "raw": "application/octet-stream", + "string": "text/plain", +} + + +@dataclass +class SerializedRequestBody: + media_type: Optional[str] = None + content: Optional[Any] = None + data: Optional[Any] = None + files: Optional[Any] = None + + +def serialize_request_body( + request_body: Any, + nullable: bool, + optional: bool, + serialization_method: str, + request_body_type, +) -> Optional[SerializedRequestBody]: + if request_body is None: + if not nullable and optional: + return None + + media_type = SERIALIZATION_METHOD_TO_CONTENT_TYPE[serialization_method] + + serialized_request_body = SerializedRequestBody(media_type) + + if re.match(r"^(application|text)\/([^+]+\+)*json.*", media_type) is not None: + serialized_request_body.content = marshal_json(request_body, request_body_type) + elif re.match(r"^multipart\/.*", media_type) is not None: + ( + serialized_request_body.media_type, + serialized_request_body.data, + serialized_request_body.files, + ) = serialize_multipart_form(media_type, request_body) + elif re.match(r"^application\/x-www-form-urlencoded.*", media_type) is not None: + serialized_request_body.data = serialize_form_data(request_body) + elif isinstance(request_body, (bytes, bytearray, io.BytesIO, io.BufferedReader)): + serialized_request_body.content = request_body + elif isinstance(request_body, str): + serialized_request_body.content = request_body + else: + raise TypeError( + f"invalid request body type {type(request_body)} for mediaType {media_type}" + ) + + return serialized_request_body diff --git a/src/mistralai/client/utils/retries.py b/src/mistralai/client/utils/retries.py new file mode 100644 index 00000000..bea13041 --- /dev/null +++ b/src/mistralai/client/utils/retries.py @@ -0,0 +1,272 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 5f1a5b90423c + +import asyncio +import random +import time +from datetime import datetime +from email.utils import parsedate_to_datetime +from typing import List, Optional + +import httpx + + +class BackoffStrategy: + initial_interval: int + max_interval: int + exponent: float + max_elapsed_time: int + + def __init__( + self, + initial_interval: int, + max_interval: int, + exponent: float, + max_elapsed_time: int, + ): + self.initial_interval = initial_interval + self.max_interval = max_interval + self.exponent = exponent + self.max_elapsed_time = max_elapsed_time + + +class RetryConfig: + strategy: str + backoff: BackoffStrategy + retry_connection_errors: bool + + def __init__( + self, strategy: str, backoff: BackoffStrategy, retry_connection_errors: bool + ): + self.strategy = strategy + self.backoff = backoff + self.retry_connection_errors = retry_connection_errors + + +class Retries: + config: RetryConfig + status_codes: List[str] + + def __init__(self, config: RetryConfig, status_codes: List[str]): + self.config = config + self.status_codes = status_codes + + +class TemporaryError(Exception): + response: httpx.Response + retry_after: Optional[int] + + def __init__(self, response: httpx.Response): + self.response = response + self.retry_after = _parse_retry_after_header(response) + + +class PermanentError(Exception): + inner: Exception + + def __init__(self, inner: Exception): + self.inner = inner + + +def _parse_retry_after_header(response: httpx.Response) -> Optional[int]: + """Parse Retry-After header from response. + + Returns: + Retry interval in milliseconds, or None if header is missing or invalid. + """ + retry_after_header = response.headers.get("retry-after") + if not retry_after_header: + return None + + try: + seconds = float(retry_after_header) + return round(seconds * 1000) + except ValueError: + pass + + try: + retry_date = parsedate_to_datetime(retry_after_header) + delta = (retry_date - datetime.now(retry_date.tzinfo)).total_seconds() + return round(max(0, delta) * 1000) + except (ValueError, TypeError): + pass + + return None + + +def _get_sleep_interval( + exception: Exception, + initial_interval: int, + max_interval: int, + exponent: float, + retries: int, +) -> float: + """Get sleep interval for retry with exponential backoff. + + Args: + exception: The exception that triggered the retry. + initial_interval: Initial retry interval in milliseconds. + max_interval: Maximum retry interval in milliseconds. + exponent: Base for exponential backoff calculation. + retries: Current retry attempt count. + + Returns: + Sleep interval in seconds. + """ + if ( + isinstance(exception, TemporaryError) + and exception.retry_after is not None + and exception.retry_after > 0 + ): + return exception.retry_after / 1000 + + sleep = (initial_interval / 1000) * exponent**retries + random.uniform(0, 1) + return min(sleep, max_interval / 1000) + + +def retry(func, retries: Retries): + if retries.config.strategy == "backoff": + + def do_request() -> httpx.Response: + res: httpx.Response + try: + res = func() + + for code in retries.status_codes: + if "X" in code.upper(): + code_range = int(code[0]) + + status_major = res.status_code / 100 + + if code_range <= status_major < code_range + 1: + raise TemporaryError(res) + else: + parsed_code = int(code) + + if res.status_code == parsed_code: + raise TemporaryError(res) + except (httpx.NetworkError, httpx.TimeoutException) as exception: + if retries.config.retry_connection_errors: + raise + + raise PermanentError(exception) from exception + except TemporaryError: + raise + except Exception as exception: + raise PermanentError(exception) from exception + + return res + + return retry_with_backoff( + do_request, + retries.config.backoff.initial_interval, + retries.config.backoff.max_interval, + retries.config.backoff.exponent, + retries.config.backoff.max_elapsed_time, + ) + + return func() + + +async def retry_async(func, retries: Retries): + if retries.config.strategy == "backoff": + + async def do_request() -> httpx.Response: + res: httpx.Response + try: + res = await func() + + for code in retries.status_codes: + if "X" in code.upper(): + code_range = int(code[0]) + + status_major = res.status_code / 100 + + if code_range <= status_major < code_range + 1: + raise TemporaryError(res) + else: + parsed_code = int(code) + + if res.status_code == parsed_code: + raise TemporaryError(res) + except (httpx.NetworkError, httpx.TimeoutException) as exception: + if retries.config.retry_connection_errors: + raise + + raise PermanentError(exception) from exception + except TemporaryError: + raise + except Exception as exception: + raise PermanentError(exception) from exception + + return res + + return await retry_with_backoff_async( + do_request, + retries.config.backoff.initial_interval, + retries.config.backoff.max_interval, + retries.config.backoff.exponent, + retries.config.backoff.max_elapsed_time, + ) + + return await func() + + +def retry_with_backoff( + func, + initial_interval=500, + max_interval=60000, + exponent=1.5, + max_elapsed_time=3600000, +): + start = round(time.time() * 1000) + retries = 0 + + while True: + try: + return func() + except PermanentError as exception: + raise exception.inner + except Exception as exception: # pylint: disable=broad-exception-caught + now = round(time.time() * 1000) + if now - start > max_elapsed_time: + if isinstance(exception, TemporaryError): + return exception.response + + raise + + sleep = _get_sleep_interval( + exception, initial_interval, max_interval, exponent, retries + ) + time.sleep(sleep) + retries += 1 + + +async def retry_with_backoff_async( + func, + initial_interval=500, + max_interval=60000, + exponent=1.5, + max_elapsed_time=3600000, +): + start = round(time.time() * 1000) + retries = 0 + + while True: + try: + return await func() + except PermanentError as exception: + raise exception.inner + except Exception as exception: # pylint: disable=broad-exception-caught + now = round(time.time() * 1000) + if now - start > max_elapsed_time: + if isinstance(exception, TemporaryError): + return exception.response + + raise + + sleep = _get_sleep_interval( + exception, initial_interval, max_interval, exponent, retries + ) + await asyncio.sleep(sleep) + retries += 1 diff --git a/src/mistralai/client/utils/security.py b/src/mistralai/client/utils/security.py new file mode 100644 index 00000000..d8b9d8fe --- /dev/null +++ b/src/mistralai/client/utils/security.py @@ -0,0 +1,195 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 1acb7c006265 + +import base64 + +from typing import ( + Any, + Dict, + List, + Optional, + Tuple, +) +from pydantic import BaseModel +from pydantic.fields import FieldInfo + +from .metadata import ( + SecurityMetadata, + find_field_metadata, +) +import os + + +def get_security(security: Any) -> Tuple[Dict[str, str], Dict[str, List[str]]]: + headers: Dict[str, str] = {} + query_params: Dict[str, List[str]] = {} + + if security is None: + return headers, query_params + + if not isinstance(security, BaseModel): + raise TypeError("security must be a pydantic model") + + sec_fields: Dict[str, FieldInfo] = security.__class__.model_fields + for name in sec_fields: + sec_field = sec_fields[name] + + value = getattr(security, name) + if value is None: + continue + + metadata = find_field_metadata(sec_field, SecurityMetadata) + if metadata is None: + continue + if metadata.option: + _parse_security_option(headers, query_params, value) + return headers, query_params + if metadata.scheme: + # Special case for basic auth or custom auth which could be a flattened model + if metadata.sub_type in ["basic", "custom"] and not isinstance( + value, BaseModel + ): + _parse_security_scheme(headers, query_params, metadata, name, security) + else: + _parse_security_scheme(headers, query_params, metadata, name, value) + + return headers, query_params + + +def get_security_from_env(security: Any, security_class: Any) -> Optional[BaseModel]: + if security is not None: + return security + + if not issubclass(security_class, BaseModel): + raise TypeError("security_class must be a pydantic model class") + + security_dict: Any = {} + + if os.getenv("MISTRAL_API_KEY"): + security_dict["api_key"] = os.getenv("MISTRAL_API_KEY") + + return security_class(**security_dict) if security_dict else None + + +def _parse_security_option( + headers: Dict[str, str], query_params: Dict[str, List[str]], option: Any +): + if not isinstance(option, BaseModel): + raise TypeError("security option must be a pydantic model") + + opt_fields: Dict[str, FieldInfo] = option.__class__.model_fields + for name in opt_fields: + opt_field = opt_fields[name] + + metadata = find_field_metadata(opt_field, SecurityMetadata) + if metadata is None or not metadata.scheme: + continue + _parse_security_scheme( + headers, query_params, metadata, name, getattr(option, name) + ) + + +def _parse_security_scheme( + headers: Dict[str, str], + query_params: Dict[str, List[str]], + scheme_metadata: SecurityMetadata, + field_name: str, + scheme: Any, +): + scheme_type = scheme_metadata.scheme_type + sub_type = scheme_metadata.sub_type + + if isinstance(scheme, BaseModel): + if scheme_type == "http": + if sub_type == "basic": + _parse_basic_auth_scheme(headers, scheme) + return + if sub_type == "custom": + return + + scheme_fields: Dict[str, FieldInfo] = scheme.__class__.model_fields + for name in scheme_fields: + scheme_field = scheme_fields[name] + + metadata = find_field_metadata(scheme_field, SecurityMetadata) + if metadata is None or metadata.field_name is None: + continue + + value = getattr(scheme, name) + + _parse_security_scheme_value( + headers, query_params, scheme_metadata, metadata, name, value + ) + else: + _parse_security_scheme_value( + headers, query_params, scheme_metadata, scheme_metadata, field_name, scheme + ) + + +def _parse_security_scheme_value( + headers: Dict[str, str], + query_params: Dict[str, List[str]], + scheme_metadata: SecurityMetadata, + security_metadata: SecurityMetadata, + field_name: str, + value: Any, +): + scheme_type = scheme_metadata.scheme_type + sub_type = scheme_metadata.sub_type + + header_name = security_metadata.get_field_name(field_name) + + if scheme_type == "apiKey": + if sub_type == "header": + headers[header_name] = value + elif sub_type == "query": + query_params[header_name] = [value] + else: + raise ValueError("sub type {sub_type} not supported") + elif scheme_type == "openIdConnect": + headers[header_name] = _apply_bearer(value) + elif scheme_type == "oauth2": + if sub_type != "client_credentials": + headers[header_name] = _apply_bearer(value) + elif scheme_type == "http": + if sub_type == "bearer": + headers[header_name] = _apply_bearer(value) + elif sub_type == "basic": + headers[header_name] = value + elif sub_type == "custom": + return + else: + raise ValueError("sub type {sub_type} not supported") + else: + raise ValueError("scheme type {scheme_type} not supported") + + +def _apply_bearer(token: str) -> str: + return token.lower().startswith("bearer ") and token or f"Bearer {token}" + + +def _parse_basic_auth_scheme(headers: Dict[str, str], scheme: Any): + username = "" + password = "" + + if not isinstance(scheme, BaseModel): + raise TypeError("basic auth scheme must be a pydantic model") + + scheme_fields: Dict[str, FieldInfo] = scheme.__class__.model_fields + for name in scheme_fields: + scheme_field = scheme_fields[name] + + metadata = find_field_metadata(scheme_field, SecurityMetadata) + if metadata is None or metadata.field_name is None: + continue + + field_name = metadata.field_name + value = getattr(scheme, name) + + if field_name == "username": + username = value + if field_name == "password": + password = value + + data = f"{username}:{password}".encode() + headers["Authorization"] = f"Basic {base64.b64encode(data).decode()}" diff --git a/src/mistralai/client/utils/serializers.py b/src/mistralai/client/utils/serializers.py new file mode 100644 index 00000000..fbc2772d --- /dev/null +++ b/src/mistralai/client/utils/serializers.py @@ -0,0 +1,230 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 53c57c7f29a8 + +from decimal import Decimal +import functools +import json +import typing +from typing import Any, Dict, List, Tuple, Union, get_args +import typing_extensions +from typing_extensions import get_origin + +import httpx +from pydantic import ConfigDict, create_model +from pydantic_core import from_json + +from ..types.basemodel import BaseModel, Nullable, OptionalNullable, Unset + + +def serialize_decimal(as_str: bool): + def serialize(d): + # Optional[T] is a Union[T, None] + if is_union(type(d)) and type(None) in get_args(type(d)) and d is None: + return None + if isinstance(d, Unset): + return d + + if not isinstance(d, Decimal): + raise ValueError("Expected Decimal object") + + return str(d) if as_str else float(d) + + return serialize + + +def validate_decimal(d): + if d is None: + return None + + if isinstance(d, (Decimal, Unset)): + return d + + if not isinstance(d, (str, int, float)): + raise ValueError("Expected string, int or float") + + return Decimal(str(d)) + + +def serialize_float(as_str: bool): + def serialize(f): + # Optional[T] is a Union[T, None] + if is_union(type(f)) and type(None) in get_args(type(f)) and f is None: + return None + if isinstance(f, Unset): + return f + + if not isinstance(f, float): + raise ValueError("Expected float") + + return str(f) if as_str else f + + return serialize + + +def validate_float(f): + if f is None: + return None + + if isinstance(f, (float, Unset)): + return f + + if not isinstance(f, str): + raise ValueError("Expected string") + + return float(f) + + +def serialize_int(as_str: bool): + def serialize(i): + # Optional[T] is a Union[T, None] + if is_union(type(i)) and type(None) in get_args(type(i)) and i is None: + return None + if isinstance(i, Unset): + return i + + if not isinstance(i, int): + raise ValueError("Expected int") + + return str(i) if as_str else i + + return serialize + + +def validate_int(b): + if b is None: + return None + + if isinstance(b, (int, Unset)): + return b + + if not isinstance(b, str): + raise ValueError("Expected string") + + return int(b) + + +def validate_const(v): + def validate(c): + # Optional[T] is a Union[T, None] + if is_union(type(c)) and type(None) in get_args(type(c)) and c is None: + return None + + if v != c: + raise ValueError(f"Expected {v}") + + return c + + return validate + + +def unmarshal_json(raw, typ: Any) -> Any: + return unmarshal(from_json(raw), typ) + + +def unmarshal(val, typ: Any) -> Any: + unmarshaller = create_model( + "Unmarshaller", + body=(typ, ...), + __config__=ConfigDict(populate_by_name=True, arbitrary_types_allowed=True), + ) + + m = unmarshaller(body=val) + + # pyright: ignore[reportAttributeAccessIssue] + return m.body # type: ignore + + +def marshal_json(val, typ): + if is_nullable(typ) and val is None: + return "null" + + marshaller = create_model( + "Marshaller", + body=(typ, ...), + __config__=ConfigDict(populate_by_name=True, arbitrary_types_allowed=True), + ) + + m = marshaller(body=val) + + d = m.model_dump(by_alias=True, mode="json", exclude_none=True) + + if len(d) == 0: + return "" + + return json.dumps(d[next(iter(d))], separators=(",", ":")) + + +def is_nullable(field): + origin = get_origin(field) + if origin is Nullable or origin is OptionalNullable: + return True + + if not origin is Union or type(None) not in get_args(field): + return False + + for arg in get_args(field): + if get_origin(arg) is Nullable or get_origin(arg) is OptionalNullable: + return True + + return False + + +def is_union(obj: object) -> bool: + """ + Returns True if the given object is a typing.Union or typing_extensions.Union. + """ + return any( + obj is typing_obj for typing_obj in _get_typing_objects_by_name_of("Union") + ) + + +def stream_to_text(stream: httpx.Response) -> str: + return "".join(stream.iter_text()) + + +async def stream_to_text_async(stream: httpx.Response) -> str: + return "".join([chunk async for chunk in stream.aiter_text()]) + + +def stream_to_bytes(stream: httpx.Response) -> bytes: + return stream.content + + +async def stream_to_bytes_async(stream: httpx.Response) -> bytes: + return await stream.aread() + + +def get_pydantic_model(data: Any, typ: Any) -> Any: + if not _contains_pydantic_model(data): + return unmarshal(data, typ) + + return data + + +def _contains_pydantic_model(data: Any) -> bool: + if isinstance(data, BaseModel): + return True + if isinstance(data, List): + return any(_contains_pydantic_model(item) for item in data) + if isinstance(data, Dict): + return any(_contains_pydantic_model(value) for value in data.values()) + + return False + + +@functools.cache +def _get_typing_objects_by_name_of(name: str) -> Tuple[Any, ...]: + """ + Get typing objects by name from typing and typing_extensions. + Reference: https://typing-extensions.readthedocs.io/en/latest/#runtime-use-of-types + """ + result = tuple( + getattr(module, name) + for module in (typing, typing_extensions) + if hasattr(module, name) + ) + if not result: + raise ValueError( + f"Neither typing nor typing_extensions has an object called {name!r}" + ) + return result diff --git a/src/mistralai/client/utils/unions.py b/src/mistralai/client/utils/unions.py new file mode 100644 index 00000000..14ef1bd5 --- /dev/null +++ b/src/mistralai/client/utils/unions.py @@ -0,0 +1,33 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: d23713342634 + +from typing import Any + +from pydantic import BaseModel, TypeAdapter + + +def parse_open_union( + v: Any, + *, + disc_key: str, + variants: dict[str, Any], + unknown_cls: type, + union_name: str, +) -> Any: + """Parse an open discriminated union value with forward-compatibility. + + Known discriminator values are dispatched to their variant types. + Unknown discriminator values produce an instance of the fallback class, + preserving the raw payload for inspection. + """ + if isinstance(v, BaseModel): + return v + if not isinstance(v, dict) or disc_key not in v: + raise ValueError(f"{union_name}: expected object with '{disc_key}' field") + disc = v[disc_key] + variant_cls = variants.get(disc) + if variant_cls is not None: + if isinstance(variant_cls, type) and issubclass(variant_cls, BaseModel): + return variant_cls.model_validate(v) + return TypeAdapter(variant_cls).validate_python(v) + return unknown_cls(raw=v) diff --git a/src/mistralai/client/utils/unmarshal_json_response.py b/src/mistralai/client/utils/unmarshal_json_response.py new file mode 100644 index 00000000..624433c4 --- /dev/null +++ b/src/mistralai/client/utils/unmarshal_json_response.py @@ -0,0 +1,39 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: b13585fc5626 + +from typing import Any, Optional, Type, TypeVar, overload + +import httpx + +from .serializers import unmarshal_json +from mistralai.client import errors + +T = TypeVar("T") + + +@overload +def unmarshal_json_response( + typ: Type[T], http_res: httpx.Response, body: Optional[str] = None +) -> T: ... + + +@overload +def unmarshal_json_response( + typ: Any, http_res: httpx.Response, body: Optional[str] = None +) -> Any: ... + + +def unmarshal_json_response( + typ: Any, http_res: httpx.Response, body: Optional[str] = None +) -> Any: + if body is None: + body = http_res.text + try: + return unmarshal_json(body, typ) + except Exception as e: + raise errors.ResponseValidationError( + "Response validation failed", + http_res, + e, + body, + ) from e diff --git a/src/mistralai/client/utils/url.py b/src/mistralai/client/utils/url.py new file mode 100644 index 00000000..27a6a3a0 --- /dev/null +++ b/src/mistralai/client/utils/url.py @@ -0,0 +1,156 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 3c6496c17510 + +from decimal import Decimal +from typing import ( + Any, + Dict, + get_type_hints, + List, + Optional, + Union, + get_args, + get_origin, +) +from pydantic import BaseModel +from pydantic.fields import FieldInfo + +from .metadata import ( + PathParamMetadata, + find_field_metadata, +) +from .values import ( + _get_serialized_params, + _is_set, + _populate_from_globals, + _val_to_string, +) + + +def generate_url( + server_url: str, + path: str, + path_params: Any, + gbls: Optional[Any] = None, +) -> str: + path_param_values: Dict[str, str] = {} + + globals_already_populated = _populate_path_params( + path_params, gbls, path_param_values, [] + ) + if _is_set(gbls): + _populate_path_params(gbls, None, path_param_values, globals_already_populated) + + for key, value in path_param_values.items(): + path = path.replace("{" + key + "}", value, 1) + + return remove_suffix(server_url, "/") + path + + +def _populate_path_params( + path_params: Any, + gbls: Any, + path_param_values: Dict[str, str], + skip_fields: List[str], +) -> List[str]: + globals_already_populated: List[str] = [] + + if not isinstance(path_params, BaseModel): + return globals_already_populated + + path_param_fields: Dict[str, FieldInfo] = path_params.__class__.model_fields + path_param_field_types = get_type_hints(path_params.__class__) + for name in path_param_fields: + if name in skip_fields: + continue + + field = path_param_fields[name] + + param_metadata = find_field_metadata(field, PathParamMetadata) + if param_metadata is None: + continue + + param = getattr(path_params, name) if _is_set(path_params) else None + param, global_found = _populate_from_globals( + name, param, PathParamMetadata, gbls + ) + if global_found: + globals_already_populated.append(name) + + if not _is_set(param): + continue + + f_name = field.alias if field.alias is not None else name + serialization = param_metadata.serialization + if serialization is not None: + serialized_params = _get_serialized_params( + param_metadata, f_name, param, path_param_field_types[name] + ) + for key, value in serialized_params.items(): + path_param_values[key] = value + else: + pp_vals: List[str] = [] + if param_metadata.style == "simple": + if isinstance(param, List): + for pp_val in param: + if not _is_set(pp_val): + continue + pp_vals.append(_val_to_string(pp_val)) + path_param_values[f_name] = ",".join(pp_vals) + elif isinstance(param, Dict): + for pp_key in param: + if not _is_set(param[pp_key]): + continue + if param_metadata.explode: + pp_vals.append(f"{pp_key}={_val_to_string(param[pp_key])}") + else: + pp_vals.append(f"{pp_key},{_val_to_string(param[pp_key])}") + path_param_values[f_name] = ",".join(pp_vals) + elif not isinstance(param, (str, int, float, complex, bool, Decimal)): + param_fields: Dict[str, FieldInfo] = param.__class__.model_fields + for name in param_fields: + param_field = param_fields[name] + + param_value_metadata = find_field_metadata( + param_field, PathParamMetadata + ) + if param_value_metadata is None: + continue + + param_name = ( + param_field.alias if param_field.alias is not None else name + ) + + param_field_val = getattr(param, name) + if not _is_set(param_field_val): + continue + if param_metadata.explode: + pp_vals.append( + f"{param_name}={_val_to_string(param_field_val)}" + ) + else: + pp_vals.append( + f"{param_name},{_val_to_string(param_field_val)}" + ) + path_param_values[f_name] = ",".join(pp_vals) + elif _is_set(param): + path_param_values[f_name] = _val_to_string(param) + + return globals_already_populated + + +def is_optional(field): + return get_origin(field) is Union and type(None) in get_args(field) + + +def template_url(url_with_params: str, params: Dict[str, str]) -> str: + for key, value in params.items(): + url_with_params = url_with_params.replace("{" + key + "}", value) + + return url_with_params + + +def remove_suffix(input_string, suffix): + if suffix and input_string.endswith(suffix): + return input_string[: -len(suffix)] + return input_string diff --git a/src/mistralai/client/utils/values.py b/src/mistralai/client/utils/values.py new file mode 100644 index 00000000..2469a9f3 --- /dev/null +++ b/src/mistralai/client/utils/values.py @@ -0,0 +1,138 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: bb6ade7a7f82 + +from datetime import datetime +from enum import Enum +from email.message import Message +from functools import partial +import os +from typing import Any, Callable, Dict, List, Optional, Tuple, TypeVar, Union, cast + +from httpx import Response +from pydantic import BaseModel +from pydantic.fields import FieldInfo + +from ..types.basemodel import Unset + +from .serializers import marshal_json + +from .metadata import ParamMetadata, find_field_metadata + + +def match_content_type(content_type: str, pattern: str) -> bool: + if pattern in (content_type, "*", "*/*"): + return True + + msg = Message() + msg["content-type"] = content_type + media_type = msg.get_content_type() + + if media_type == pattern: + return True + + parts = media_type.split("/") + if len(parts) == 2: + if pattern in (f"{parts[0]}/*", f"*/{parts[1]}"): + return True + + return False + + +def match_status_codes(status_codes: List[str], status_code: int) -> bool: + if "default" in status_codes: + return True + + for code in status_codes: + if code == str(status_code): + return True + + if code.endswith("XX") and code.startswith(str(status_code)[:1]): + return True + return False + + +T = TypeVar("T") + +def cast_partial(typ): + return partial(cast, typ) + +def get_global_from_env( + value: Optional[T], env_key: str, type_cast: Callable[[str], T] +) -> Optional[T]: + if value is not None: + return value + env_value = os.getenv(env_key) + if env_value is not None: + try: + return type_cast(env_value) + except ValueError: + pass + return None + + +def match_response( + response: Response, code: Union[str, List[str]], content_type: str +) -> bool: + codes = code if isinstance(code, list) else [code] + return match_status_codes(codes, response.status_code) and match_content_type( + response.headers.get("content-type", "application/octet-stream"), content_type + ) + + +def _populate_from_globals( + param_name: str, value: Any, param_metadata_type: type, gbls: Any +) -> Tuple[Any, bool]: + if gbls is None: + return value, False + + if not isinstance(gbls, BaseModel): + raise TypeError("globals must be a pydantic model") + + global_fields: Dict[str, FieldInfo] = gbls.__class__.model_fields + found = False + for name in global_fields: + field = global_fields[name] + if name is not param_name: + continue + + found = True + + if value is not None: + return value, True + + global_value = getattr(gbls, name) + + param_metadata = find_field_metadata(field, param_metadata_type) + if param_metadata is None: + return value, True + + return global_value, True + + return value, found + + +def _val_to_string(val) -> str: + if isinstance(val, bool): + return str(val).lower() + if isinstance(val, datetime): + return str(val.isoformat().replace("+00:00", "Z")) + if isinstance(val, Enum): + return str(val.value) + + return str(val) + + +def _get_serialized_params( + metadata: ParamMetadata, field_name: str, obj: Any, typ: type +) -> Dict[str, str]: + params: Dict[str, str] = {} + + serialization = metadata.serialization + if serialization == "json": + params[field_name] = marshal_json(obj, typ) + + return params + + +def _is_set(value: Any) -> bool: + return value is not None and not isinstance(value, Unset) diff --git a/src/mistralai/client_base.py b/src/mistralai/client_base.py deleted file mode 100644 index 9d82a535..00000000 --- a/src/mistralai/client_base.py +++ /dev/null @@ -1,73 +0,0 @@ -import logging -from abc import ABC -from typing import Any, Dict, List, Optional - -from mistralai.exceptions import MistralAPIException, MistralException -from mistralai.models.chat_completion import ChatMessage - - -class ClientBase(ABC): - def __init__( - self, - endpoint: str, - api_key: Optional[str] = None, - max_retries: int = 5, - timeout: int = 120, - ): - self._max_retries = max_retries - self._timeout = timeout - - self._endpoint = endpoint - self._api_key = api_key - self._logger = logging.getLogger(__name__) - - @staticmethod - def _make_chat_request( - model: str, - messages: List[ChatMessage], - temperature: Optional[float] = None, - max_tokens: Optional[int] = None, - top_p: Optional[float] = None, - random_seed: Optional[int] = None, - stream: Optional[bool] = None, - safe_mode: Optional[bool] = False, - ) -> Dict[str, Any]: - request_data: Dict[str, Any] = { - "model": model, - "messages": [msg.model_dump() for msg in messages], - "safe_prompt": safe_mode, - } - if temperature is not None: - request_data["temperature"] = temperature - if max_tokens is not None: - request_data["max_tokens"] = max_tokens - if top_p is not None: - request_data["top_p"] = top_p - if random_seed is not None: - request_data["random_seed"] = random_seed - if stream is not None: - request_data["stream"] = stream - - return request_data - - def _check_response( - self, json_response: Dict[str, Any], headers: Dict[str, Any], status: int - ) -> None: - if "object" not in json_response: - raise MistralException(message=f"Unexpected response: {json_response}") - if "error" == json_response["object"]: # has errors - raise MistralAPIException( - message=json_response["message"], - http_status=status, - headers=headers, - ) - if 400 <= status < 500: - raise MistralAPIException( - message=f"Unexpected client error (status {status}): {json_response}", - http_status=status, - headers=headers, - ) - if status >= 500: - raise MistralException( - message=f"Unexpected server error (status {status}): {json_response}" - ) diff --git a/src/mistralai/constants.py b/src/mistralai/constants.py deleted file mode 100644 index b274a4c0..00000000 --- a/src/mistralai/constants.py +++ /dev/null @@ -1,5 +0,0 @@ - - -RETRY_STATUS_CODES = {429, 500, 502, 503, 504} - -ENDPOINT = "https://api.mistral.ai" diff --git a/src/mistralai/exceptions.py b/src/mistralai/exceptions.py deleted file mode 100644 index 6cfc4ce6..00000000 --- a/src/mistralai/exceptions.py +++ /dev/null @@ -1,63 +0,0 @@ -from __future__ import annotations - -from typing import Any, Dict, Optional - -import aiohttp -from requests import Response - - -class MistralException(Exception): - """Base Exception class, returned when nothing more specific applies""" - - def __init__(self, message: Optional[str] = None) -> None: - super(MistralException, self).__init__(message) - - self.message = message - - def __str__(self) -> str: - msg = self.message or "" - return msg - - def __repr__(self) -> str: - return f"{self.__class__.__name__}(message={str(self)})" - - -class MistralAPIException(MistralException): - """Returned when the API responds with an error message""" - - def __init__( - self, - message: Optional[str] = None, - http_status: Optional[int] = None, - headers: Optional[Dict[str, Any]] = None, - ) -> None: - super().__init__(message) - self.http_status = http_status - self.headers = headers or {} - - @classmethod - def from_response( - cls, response: Response, message: Optional[str] = None - ) -> MistralAPIException: - return cls( - message=message or response.text, - http_status=response.status_code, - headers=dict(response.headers), - ) - - @classmethod - def from_aio_response( - cls, response: aiohttp.ClientResponse, message: Optional[str] = None - ) -> MistralAPIException: - return cls( - message=message, - http_status=response.status, - headers=dict(response.headers), - ) - - def __repr__(self) -> str: - return f"{self.__class__.__name__}(message={str(self)}, http_status={self.http_status})" - - -class MistralConnectionException(MistralException): - """Returned when the SDK can not reach the API server for any reason""" diff --git a/src/mistralai/extra/README.md b/src/mistralai/extra/README.md new file mode 100644 index 00000000..0593d84a --- /dev/null +++ b/src/mistralai/extra/README.md @@ -0,0 +1,56 @@ +## Context + +The extra package contains the custom logic which is too complex to be generated by Speakeasy from the OpenAPI specs. It was introduced to add the Structured Outputs feature. + +## Development / Contributing + +To add custom code in the SDK, you need to use [Speakeasy custom code regions](https://www.speakeasy.com/docs/customize/code/code-regions/overview) as below. + +### Runbook of SDK customization + +1. Add the code you want to import in the `src/mistralai/extra/` package. To have it importable from the SDK, you need to add it in the `__init__.py` file: +```python +from .my_custom_file import my_custom_function + +__all__ = ["my_custom_function"] +``` + +2. Add a new custom code region in the SDK files, e.g in `src/mistralai/chat.py`: +```python +# region imports +from typing import Type +from mistralai.extra import my_custom_function +# endregion imports + +class Chat(BaseSDK): + r"""Chat Completion API.""" + + # region sdk-class-body + def my_custom_method(self, param: str) -> Type[some_type]: + output = my_custom_function(param1) + return output + # endregion sdk-class-body +``` + +3. Now build the SDK with the custom code: +```bash +rm -rf dist; uv build; uv pip install --reinstall ~/client-python/dist/mistralai-1.4.1-py3-none-any.whl +``` + +4. And now you should be able to call the custom method: +```python +import os +from mistralai import Mistral + +api_key = os.environ["MISTRAL_API_KEY"] +client = Mistral(api_key=api_key) + +client.chat.my_custom_method(param="test") +``` + +### Run the unit tests + +To run the unit tests for the `extra` package, you can run the following command from the root of the repository: +```bash +python3.12 -m unittest discover -s src/mistralai/extra/tests -t src +``` diff --git a/src/mistralai/extra/__init__.py b/src/mistralai/extra/__init__.py new file mode 100644 index 00000000..cabda728 --- /dev/null +++ b/src/mistralai/extra/__init__.py @@ -0,0 +1,61 @@ +from typing import TYPE_CHECKING + +from .struct_chat import ( + ParsedChatCompletionResponse, + convert_to_parsed_chat_completion_response, +) +from .utils import response_format_from_pydantic_model +from .utils.response_format import CustomPydanticModel + +if TYPE_CHECKING: + from .realtime import ( + AudioEncoding, + AudioFormat, + RealtimeConnection, + RealtimeTranscriptionError, + RealtimeTranscriptionErrorDetail, + RealtimeTranscriptionSession, + RealtimeTranscriptionSessionCreated, + RealtimeTranscriptionSessionUpdated, + RealtimeTranscription, + UnknownRealtimeEvent, + ) + +_REALTIME_EXPORTS = { + "RealtimeTranscription", + "RealtimeConnection", + "AudioEncoding", + "AudioFormat", + "UnknownRealtimeEvent", + "RealtimeTranscriptionError", + "RealtimeTranscriptionErrorDetail", + "RealtimeTranscriptionSession", + "RealtimeTranscriptionSessionCreated", + "RealtimeTranscriptionSessionUpdated", +} + + +def __getattr__(name: str): + if name in _REALTIME_EXPORTS: + from . import realtime + + return getattr(realtime, name) + raise AttributeError(f"module {__name__!r} has no attribute {name!r}") + + +__all__ = [ + "convert_to_parsed_chat_completion_response", + "response_format_from_pydantic_model", + "CustomPydanticModel", + "ParsedChatCompletionResponse", + "RealtimeTranscription", + "RealtimeConnection", + "AudioEncoding", + "AudioFormat", + "UnknownRealtimeEvent", + "RealtimeTranscriptionError", + "RealtimeTranscriptionErrorDetail", + "RealtimeTranscriptionSession", + "RealtimeTranscriptionSessionCreated", + "RealtimeTranscriptionSessionUpdated", +] diff --git a/src/mistralai/extra/exceptions.py b/src/mistralai/extra/exceptions.py new file mode 100644 index 00000000..d2cd3e79 --- /dev/null +++ b/src/mistralai/extra/exceptions.py @@ -0,0 +1,59 @@ +from typing import Optional, TYPE_CHECKING + +if TYPE_CHECKING: + from mistralai.client.models import RealtimeTranscriptionError + + +class MistralClientException(Exception): + """Base exception for client errors.""" + + +class RunException(MistralClientException): + """Conversation run errors.""" + + +class MCPException(MistralClientException): + """MCP operation errors.""" + + +class MCPAuthException(MCPException): + """MCP authentication errors.""" + + +class RealtimeTranscriptionException(MistralClientException): + """Base realtime transcription exception.""" + + def __init__( + self, + message: str, + *, + code: Optional[int] = None, + payload: Optional[object] = None, + ) -> None: + super().__init__(message) + self.code = code + self.payload = payload + + +class RealtimeTranscriptionWSError(RealtimeTranscriptionException): + def __init__( + self, + message: str, + *, + payload: Optional["RealtimeTranscriptionError"] = None, + raw: Optional[object] = None, + ) -> None: + code: Optional[int] = None + if payload is not None: + try: + maybe_code = getattr(payload.error, "code", None) + if isinstance(maybe_code, int): + code = maybe_code + except Exception: + code = None + + super().__init__( + message, code=code, payload=payload if payload is not None else raw + ) + self.payload_typed = payload + self.payload_raw = raw diff --git a/src/mistralai/__init__.py b/src/mistralai/extra/mcp/__init__.py similarity index 100% rename from src/mistralai/__init__.py rename to src/mistralai/extra/mcp/__init__.py diff --git a/src/mistralai/extra/mcp/auth.py b/src/mistralai/extra/mcp/auth.py new file mode 100644 index 00000000..8a61ddab --- /dev/null +++ b/src/mistralai/extra/mcp/auth.py @@ -0,0 +1,165 @@ +import logging + +import httpx +from authlib.integrations.httpx_client import AsyncOAuth2Client as AsyncOAuth2ClientBase +from authlib.oauth2.rfc8414 import AuthorizationServerMetadata + +from mistralai.client.types import BaseModel + +logger = logging.getLogger(__name__) + + +class Oauth2AuthorizationScheme(BaseModel): + """Information about the oauth flow to perform with the authorization server.""" + + authorization_url: str + token_url: str + scope: list[str] + description: str | None = None + refresh_url: str | None = None + + +class OAuthParams(BaseModel): + """Required params for authorization.""" + + scheme: Oauth2AuthorizationScheme + client_id: str + client_secret: str + + +class AsyncOAuth2Client(AsyncOAuth2ClientBase): + """Subclass of the Async httpx oauth client which provides a constructor from OAuthParams.""" + + @classmethod + def from_oauth_params(cls, oauth_params: OAuthParams) -> "AsyncOAuth2Client": + return cls( + client_id=oauth_params.client_id, + client_secret=oauth_params.client_secret, + scope=oauth_params.scheme.scope, + ) + + +async def get_well_known_authorization_server_metadata( + server_url: str, +) -> AuthorizationServerMetadata | None: + """Fetch the metadata from the well-known location. + + This should be available on MCP servers as described by the specification: + https://modelcontextprotocol.io/specification/2025-03-26/basic/authorization#2-3-server-metadata-discovery. + """ + well_known_url = f"{server_url}/.well-known/oauth-authorization-server" + response = await httpx.AsyncClient().get(well_known_url) + if 200 <= response.status_code < 300: + try: + server_metadata = AuthorizationServerMetadata(**response.json()) + server_metadata.validate() + return server_metadata + except ValueError: + logger.exception("Failed to parse oauth well-known metadata") + return None + else: + logger.error(f"Failed to get oauth well-known metadata from {server_url}") + return None + + +async def get_oauth_server_metadata(server_url: str) -> AuthorizationServerMetadata: + """Fetch the metadata from the authorization server to perform the oauth flow.""" + # 1) attempt to get the metadata from the resource server at /.well-known/oauth-protected-resource + # TODO: new self-discovery protocol, not released yet + + # 2) attempt to get the metadata from the authorization server at /.well-known/oauth-authorization-server + metadata = await get_well_known_authorization_server_metadata(server_url=server_url) + if metadata is not None: + return metadata + + # 3) fallback on default endpoints + # https://modelcontextprotocol.io/specification/2025-03-26/basic/authorization#2-3-3-fallbacks-for-servers-without-metadata-discovery + return AuthorizationServerMetadata( + issuer=server_url, + authorization_endpoint=f"{server_url}/authorize", + token_endpoint=f"{server_url}/token", + register_endpoint=f"{server_url}/register", + response_types_supported=["code"], + response_modes_supported=["query"], + grant_types_supported=["authorization_code", "refresh_token"], + token_endpoint_auth_methods_supported=["client_secret_basic"], + code_challenge_methods_supported=["S256", "plain"], + ) + + +async def dynamic_client_registration( + register_endpoint: str, + redirect_url: str, + async_client: httpx.AsyncClient, +) -> tuple[str, str]: + """Try to register the client dynamically with an MCP server. + + Returns a client_id and client_secret. + """ + # Construct the registration request payload + registration_payload = { + "client_name": "MistralSDKClient", + "grant_types": ["authorization_code", "refresh_token"], + "token_endpoint_auth_method": "client_secret_basic", + "response_types": ["code"], + "redirect_uris": [redirect_url], + } + + # Make the registration request + response = await async_client.post(register_endpoint, json=registration_payload) + try: + response.raise_for_status() + registration_info = response.json() + client_id = registration_info["client_id"] + client_secret = registration_info["client_secret"] + except Exception as e: + raise ValueError( + f"Client registration failed: status={response.status_code}, error={response.text}" + ) from e + return client_id, client_secret + + +async def build_oauth_params( + server_url: str, + redirect_url: str, + client_id: str | None = None, + client_secret: str | None = None, + scope: list[str] | None = None, + async_client: httpx.AsyncClient | None = None, +) -> OAuthParams: + """Get issuer metadata and build the oauth required params.""" + metadata = await get_oauth_server_metadata(server_url=server_url) + oauth_scheme = Oauth2AuthorizationScheme( + authorization_url=metadata.authorization_endpoint, + token_url=metadata.token_endpoint, + scope=scope or [], + refresh_url=metadata.token_endpoint + if "refresh_token" in metadata.grant_types_supported + else None, + ) + if client_id and client_secret: + return OAuthParams( + client_id=client_id, + client_secret=client_secret, + scheme=oauth_scheme, + ) + + # Try to dynamically register the client + if async_client: + reg_client_id, reg_client_secret = await dynamic_client_registration( + register_endpoint=metadata.registration_endpoint, + redirect_url=redirect_url, + async_client=async_client, + ) + else: + async with httpx.AsyncClient() as async_client: + reg_client_id, reg_client_secret = await dynamic_client_registration( + register_endpoint=metadata.registration_endpoint, + redirect_url=redirect_url, + async_client=async_client, + ) + return OAuthParams( + client_id=reg_client_id, + client_secret=reg_client_secret, + scheme=oauth_scheme, + ) diff --git a/src/mistralai/extra/mcp/base.py b/src/mistralai/extra/mcp/base.py new file mode 100644 index 00000000..115eff61 --- /dev/null +++ b/src/mistralai/extra/mcp/base.py @@ -0,0 +1,155 @@ +import logging +import typing +from collections.abc import Sequence +from contextlib import AsyncExitStack +from typing import Any, Protocol + +from mcp import ClientSession # pyright: ignore[reportMissingImports] +from mcp.types import ( # pyright: ignore[reportMissingImports] + ContentBlock, + ListPromptsResult, +) + +from mistralai.extra.exceptions import MCPException +from mistralai.client.models import ( + FunctionTool, + Function, + SystemMessageTypedDict, + AssistantMessageTypedDict, + TextChunkTypedDict, +) + +logger = logging.getLogger(__name__) + + +class MCPSystemPrompt(typing.TypedDict): + description: str | None + messages: list[SystemMessageTypedDict | AssistantMessageTypedDict] + + +class MCPClientProtocol(Protocol): + """MCP client that converts MCP artifacts to Mistral format.""" + + _name: str + + async def initialize(self, exit_stack: AsyncExitStack | None) -> None: + ... + + async def aclose(self) -> None: + ... + + async def get_tools(self) -> list[FunctionTool]: + ... + + async def execute_tool( + self, name: str, arguments: dict[str, Any] + ) -> list[TextChunkTypedDict]: + ... + + async def get_system_prompt( + self, name: str, arguments: dict[str, Any] + ) -> MCPSystemPrompt: + ... + + async def list_system_prompts(self) -> ListPromptsResult: + ... + + +class MCPClientBase(MCPClientProtocol): + """Base class to implement functionalities from an initialized MCP session.""" + + _session: ClientSession + + def __init__(self, name: str | None = None): + self._name = name or self.__class__.__name__ + self._exit_stack: AsyncExitStack | None = None + self._is_initialized = False + + def _convert_content(self, mcp_content: ContentBlock) -> TextChunkTypedDict: + if not mcp_content.type == "text": + raise MCPException("Only supporting text tool responses for now.") + return {"type": "text", "text": mcp_content.text} + + def _convert_content_list( + self, mcp_contents: Sequence[ContentBlock] + ) -> list[TextChunkTypedDict]: + content_chunks = [] + for mcp_content in mcp_contents: + content_chunks.append(self._convert_content(mcp_content)) + return content_chunks + + async def get_tools(self) -> list[FunctionTool]: + mcp_tools = await self._session.list_tools() + tools = [] + for mcp_tool in mcp_tools.tools: + tools.append( + FunctionTool( + function=Function( + name=mcp_tool.name, + description=mcp_tool.description, + parameters=mcp_tool.inputSchema, + strict=True, + ), + ) + ) + return tools + + async def execute_tool( + self, name: str, arguments: dict[str, Any] + ) -> list[TextChunkTypedDict]: + contents = await self._session.call_tool(name=name, arguments=arguments) + return self._convert_content_list(contents.content) + + async def get_system_prompt( + self, name: str, arguments: dict[str, Any] + ) -> MCPSystemPrompt: + prompt_result = await self._session.get_prompt(name=name, arguments=arguments) + return { + "description": prompt_result.description, + "messages": [ + typing.cast( + SystemMessageTypedDict | AssistantMessageTypedDict, + { + "role": message.role, + "content": self._convert_content(mcp_content=message.content), + }, + ) + for message in prompt_result.messages + ], + } + + async def list_system_prompts(self) -> ListPromptsResult: + return await self._session.list_prompts() + + async def initialize(self, exit_stack: AsyncExitStack | None = None) -> None: + """Initialize the MCP session.""" + # client is already initialized so return + if self._is_initialized: + return + if exit_stack is None: + self._exit_stack = AsyncExitStack() + exit_stack = self._exit_stack + stdio_transport = await self._get_transport(exit_stack=exit_stack) + mcp_session = await exit_stack.enter_async_context( + ClientSession( + read_stream=stdio_transport[0], + write_stream=stdio_transport[1], + ) + ) + await mcp_session.initialize() + self._session = mcp_session + self._is_initialized = True + + async def aclose(self): + """Close the MCP session.""" + if self._exit_stack: + await self._exit_stack.aclose() + + def __repr__(self): + return f"<{self.__class__.__name__} name={self._name!r} id=0x{id(self):x}>" + + def __str__(self): + return f"{self.__class__.__name__}(name={self._name})" + + async def _get_transport(self, exit_stack: AsyncExitStack): + raise NotImplementedError diff --git a/src/mistralai/extra/mcp/sse.py b/src/mistralai/extra/mcp/sse.py new file mode 100644 index 00000000..b4929c54 --- /dev/null +++ b/src/mistralai/extra/mcp/sse.py @@ -0,0 +1,163 @@ +import http +import logging +from contextlib import AsyncExitStack +from functools import cached_property +from typing import Any + +import httpx +from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream +from authlib.oauth2.rfc6749 import OAuth2Token +from mcp.client.sse import sse_client # pyright: ignore[reportMissingImports] +from mcp.shared.message import SessionMessage # pyright: ignore[reportMissingImports] + +from mistralai.extra.exceptions import MCPAuthException +from mistralai.extra.mcp.base import ( + MCPClientBase, +) +from mistralai.extra.mcp.auth import OAuthParams, AsyncOAuth2Client + +from mistralai.client.types import BaseModel + +logger = logging.getLogger(__name__) + + +class SSEServerParams(BaseModel): + """Parameters required for a MCPClient with SSE transport""" + + url: str + headers: dict[str, Any] | None = None + timeout: float = 5 + sse_read_timeout: float = 60 * 5 + + +class MCPClientSSE(MCPClientBase): + """MCP client that uses sse for communication. + + The client provides authentication for OAuth2 protocol following the current MCP authorization spec: + https://modelcontextprotocol.io/specification/2025-03-26/basic/authorization. + + This is possibly going to change in the future since the protocol has ongoing discussions. + """ + + _oauth_params: OAuthParams | None + _sse_params: SSEServerParams + + def __init__( + self, + sse_params: SSEServerParams, + name: str | None = None, + oauth_params: OAuthParams | None = None, + auth_token: OAuth2Token | None = None, + ): + super().__init__(name=name) + self._sse_params = sse_params + self._oauth_params: OAuthParams | None = oauth_params + self._auth_token: OAuth2Token | None = auth_token + + @cached_property + def base_url(self) -> str: + return self._sse_params.url.rstrip("/sse") + + def set_oauth_params(self, oauth_params: OAuthParams): + """Update the oauth params and client accordingly.""" + if self._oauth_params is not None: + logger.warning(f"Overriding current oauth params for {self._name}") + self._oauth_params = oauth_params + + async def get_auth_url_and_state(self, redirect_url: str) -> tuple[str, str]: + """Create the authorization url for client to start oauth flow.""" + if self._oauth_params is None: + raise MCPAuthException( + "Can't generate an authorization url without oauth_params being set, " + "make sure the oauth params have been set." + ) + oauth_client = AsyncOAuth2Client.from_oauth_params(self._oauth_params) + auth_url, state = oauth_client.create_authorization_url( + self._oauth_params.scheme.authorization_url, redirect_uri=redirect_url + ) + return auth_url, state + + async def get_token_from_auth_response( + self, + authorization_response: str, + redirect_url: str, + state: str, + ) -> OAuth2Token: + """Fetch the authentication token from the server.""" + if self._oauth_params is None: + raise MCPAuthException( + "Can't fetch a token without oauth_params, make sure they have been set." + ) + oauth_client = AsyncOAuth2Client.from_oauth_params(self._oauth_params) + oauth_token = await oauth_client.fetch_token( + url=self._oauth_params.scheme.token_url, + authorization_response=authorization_response, + redirect_uri=redirect_url, + headers={"Content-Type": "application/x-www-form-urlencoded"}, + state=state, + ) + return oauth_token + + async def refresh_auth_token(self): + """Refresh an expired token.""" + if self._oauth_params is None or self._oauth_params.scheme.refresh_url is None: + raise MCPAuthException( + "Can't refresh a token without a refresh url make sure the oauth params have been set." + ) + if self._auth_token is None: + raise MCPAuthException( + "Can't refresh a token without a refresh token, use the `set_auth_token` to add a OAuth2Token." + ) + oauth_client = AsyncOAuth2Client.from_oauth_params(self._oauth_params) + oauth_token = await oauth_client.refresh_token( + url=self._oauth_params.scheme.refresh_url, + refresh_token=self._auth_token["refresh_token"], + headers={"Content-Type": "application/x-www-form-urlencoded"}, + ) + self.set_auth_token(oauth_token) + + def set_auth_token(self, token: OAuth2Token) -> None: + """Register the authentication token with this client.""" + self._auth_token = token + + def _format_headers(self) -> dict[str, str]: + headers: dict[str, str] = {} + if self._sse_params.headers: + headers |= self._sse_params.headers + if self._auth_token: + headers["Authorization"] = f"Bearer {self._auth_token['access_token']}" + return headers + + async def requires_auth(self) -> bool: + """Check if the client requires authentication to communicate with the server.""" + response = httpx.get( + self._sse_params.url, + headers=self._format_headers(), + timeout=self._sse_params.timeout, + ) + return response.status_code == http.HTTPStatus.UNAUTHORIZED + + async def _get_transport( + self, exit_stack: AsyncExitStack + ) -> tuple[ + MemoryObjectReceiveStream[SessionMessage | Exception], + MemoryObjectSendStream[SessionMessage], + ]: + try: + return await exit_stack.enter_async_context( + sse_client( + url=self._sse_params.url, + headers=self._format_headers(), + timeout=self._sse_params.timeout, + sse_read_timeout=self._sse_params.sse_read_timeout, + ) + ) + except Exception as e: + if isinstance(e, httpx.HTTPStatusError): + if e.response.status_code == http.HTTPStatus.UNAUTHORIZED: + if self._oauth_params is None: + raise MCPAuthException( + "Authentication required but no auth params provided." + ) from e + raise MCPAuthException("Authentication required.") from e + raise diff --git a/src/mistralai/extra/mcp/stdio.py b/src/mistralai/extra/mcp/stdio.py new file mode 100644 index 00000000..a548066c --- /dev/null +++ b/src/mistralai/extra/mcp/stdio.py @@ -0,0 +1,21 @@ +import logging +from contextlib import AsyncExitStack + +from mcp import StdioServerParameters, stdio_client # pyright: ignore[reportMissingImports] + +from mistralai.extra.mcp.base import MCPClientBase + +logger = logging.getLogger(__name__) + + +class MCPClientSTDIO(MCPClientBase): + """MCP client that uses stdio for communication.""" + + def __init__( + self, stdio_params: StdioServerParameters, name: str | None = None + ): + super().__init__(name=name) + self._stdio_params = stdio_params + + async def _get_transport(self, exit_stack: AsyncExitStack): + return await exit_stack.enter_async_context(stdio_client(self._stdio_params)) diff --git a/src/mistralai/extra/observability/__init__.py b/src/mistralai/extra/observability/__init__.py new file mode 100644 index 00000000..4ff5873c --- /dev/null +++ b/src/mistralai/extra/observability/__init__.py @@ -0,0 +1,15 @@ +from contextlib import contextmanager + +from opentelemetry import trace as otel_trace + +from .otel import MISTRAL_SDK_OTEL_TRACER_NAME + + +@contextmanager +def trace(name: str, **kwargs): + tracer = otel_trace.get_tracer(MISTRAL_SDK_OTEL_TRACER_NAME) + with tracer.start_as_current_span(name, **kwargs) as span: + yield span + + +__all__ = ["trace"] diff --git a/src/mistralai/extra/observability/otel.py b/src/mistralai/extra/observability/otel.py new file mode 100644 index 00000000..4a8808ce --- /dev/null +++ b/src/mistralai/extra/observability/otel.py @@ -0,0 +1,372 @@ +import copy +import json +import logging +import os +import traceback +from datetime import datetime, timezone +from enum import Enum + +import httpx +import opentelemetry.semconv._incubating.attributes.gen_ai_attributes as gen_ai_attributes +import opentelemetry.semconv._incubating.attributes.http_attributes as http_attributes +import opentelemetry.semconv.attributes.server_attributes as server_attributes +from opentelemetry import propagate, trace +from opentelemetry.sdk.trace import SpanProcessor +from opentelemetry.trace import Span, Status, StatusCode, Tracer, set_span_in_context + +logger = logging.getLogger(__name__) + + +OTEL_SERVICE_NAME: str = "mistralai_sdk" +MISTRAL_SDK_OTEL_TRACER_NAME: str = OTEL_SERVICE_NAME + "_tracer" + +MISTRAL_SDK_DEBUG_TRACING: bool = os.getenv("MISTRAL_SDK_DEBUG_TRACING", "false").lower() == "true" +DEBUG_HINT: str = "To see detailed tracing logs, set MISTRAL_SDK_DEBUG_TRACING=true." + + +class MistralAIAttributes: + MISTRAL_AI_TOTAL_TOKENS = "mistral_ai.request.total_tokens" + MISTRAL_AI_TOOL_CALL_ARGUMENTS = "mistral_ai.tool.call.arguments" + MISTRAL_AI_MESSAGE_ID = "mistral_ai.message.id" + MISTRAL_AI_OPERATION_NAME= "mistral_ai.operation.name" + MISTRAL_AI_OCR_USAGE_PAGES_PROCESSED = "mistral_ai.ocr.usage.pages_processed" + MISTRAL_AI_OCR_USAGE_DOC_SIZE_BYTES = "mistral_ai.ocr.usage.doc_size_bytes" + MISTRAL_AI_OPERATION_ID = "mistral_ai.operation.id" + MISTRAL_AI_ERROR_TYPE = "mistral_ai.error.type" + MISTRAL_AI_ERROR_MESSAGE = "mistral_ai.error.message" + MISTRAL_AI_ERROR_CODE = "mistral_ai.error.code" + MISTRAL_AI_FUNCTION_CALL_ARGUMENTS = "mistral_ai.function.call.arguments" + +class MistralAINameValues(Enum): + OCR = "ocr" + +class TracingErrors(Exception, Enum): + FAILED_TO_CREATE_SPAN_FOR_REQUEST = "Failed to create span for request." + FAILED_TO_ENRICH_SPAN_WITH_RESPONSE = "Failed to enrich span with response." + FAILED_TO_HANDLE_ERROR_IN_SPAN = "Failed to handle error in span." + FAILED_TO_END_SPAN = "Failed to end span." + + def __str__(self): + return str(self.value) + +class GenAISpanEnum(str, Enum): + CONVERSATION = "conversation" + CONV_REQUEST = "POST /v1/conversations" + EXECUTE_TOOL = "execute_tool" + VALIDATE_RUN = "validate_run" + + @staticmethod + def function_call(func_name: str): + return f"function_call[{func_name}]" + + +def parse_time_to_nanos(ts: str) -> int: + dt = datetime.fromisoformat(ts.replace("Z", "+00:00")).astimezone(timezone.utc) + return int(dt.timestamp() * 1e9) + +def set_available_attributes(span: Span, attributes: dict) -> None: + for attribute, value in attributes.items(): + if value: + span.set_attribute(attribute, value) + + +def enrich_span_from_request(span: Span, request: httpx.Request) -> Span: + if not request.url.port: + # From httpx doc: + # Note that the URL class performs port normalization as per the WHATWG spec. + # Default ports for "http", "https", "ws", "wss", and "ftp" schemes are always treated as None. + # Handling default ports since most of the time we are using https + if request.url.scheme == "https": + port = 443 + elif request.url.scheme == "http": + port = 80 + else: + port = -1 + else: + port = request.url.port + + span.set_attributes({ + http_attributes.HTTP_REQUEST_METHOD: request.method, + http_attributes.HTTP_URL: str(request.url), + server_attributes.SERVER_ADDRESS: request.headers.get("host", ""), + server_attributes.SERVER_PORT: port + }) + if request._content: + request_body = json.loads(request._content) + + attributes = { + gen_ai_attributes.GEN_AI_REQUEST_CHOICE_COUNT: request_body.get("n", None), + gen_ai_attributes.GEN_AI_REQUEST_ENCODING_FORMATS: request_body.get("encoding_formats", None), + gen_ai_attributes.GEN_AI_REQUEST_FREQUENCY_PENALTY: request_body.get("frequency_penalty", None), + gen_ai_attributes.GEN_AI_REQUEST_MAX_TOKENS: request_body.get("max_tokens", None), + gen_ai_attributes.GEN_AI_REQUEST_MODEL: request_body.get("model", None), + gen_ai_attributes.GEN_AI_REQUEST_PRESENCE_PENALTY: request_body.get("presence_penalty", None), + gen_ai_attributes.GEN_AI_REQUEST_SEED: request_body.get("random_seed", None), + gen_ai_attributes.GEN_AI_REQUEST_STOP_SEQUENCES: request_body.get("stop", None), + gen_ai_attributes.GEN_AI_REQUEST_TEMPERATURE: request_body.get("temperature", None), + gen_ai_attributes.GEN_AI_REQUEST_TOP_P: request_body.get("top_p", None), + gen_ai_attributes.GEN_AI_REQUEST_TOP_K: request_body.get("top_k", None), + # Input messages are likely to be large, containing user/PII data and other sensitive information. + # Also structured attributes are not yet supported on spans in Python. + # For those reasons, we will not record the input messages for now. + gen_ai_attributes.GEN_AI_INPUT_MESSAGES: None, + } + # Set attributes only if they are not None. + # From OpenTelemetry documentation: None is not a valid attribute value per spec / is not a permitted value type for an attribute. + set_available_attributes(span, attributes) + return span + + +def enrich_span_from_response(tracer: trace.Tracer, span: Span, operation_id: str, response: httpx.Response) -> None: + span.set_status(Status(StatusCode.OK)) + response_data = json.loads(response.content) + + # Base attributes + attributes: dict[str, str | int] = { + http_attributes.HTTP_RESPONSE_STATUS_CODE: response.status_code, + MistralAIAttributes.MISTRAL_AI_OPERATION_ID: operation_id, + gen_ai_attributes.GEN_AI_PROVIDER_NAME: gen_ai_attributes.GenAiProviderNameValues.MISTRAL_AI.value + } + + # Add usage attributes if available + usage = response_data.get("usage", {}) + if usage: + attributes.update({ + gen_ai_attributes.GEN_AI_USAGE_PROMPT_TOKENS: usage.get("prompt_tokens", 0), + gen_ai_attributes.GEN_AI_USAGE_OUTPUT_TOKENS: usage.get("completion_tokens", 0), + MistralAIAttributes.MISTRAL_AI_TOTAL_TOKENS: usage.get("total_tokens", 0) + }) + + span.set_attributes(attributes) + if operation_id == "agents_api_v1_agents_create": + # Semantics from https://opentelemetry.io/docs/specs/semconv/gen-ai/gen-ai-agent-spans/#create-agent-span + agent_attributes = { + gen_ai_attributes.GEN_AI_OPERATION_NAME: gen_ai_attributes.GenAiOperationNameValues.CREATE_AGENT.value, + gen_ai_attributes.GEN_AI_AGENT_DESCRIPTION: response_data.get("description", ""), + gen_ai_attributes.GEN_AI_AGENT_ID: response_data.get("id", ""), + gen_ai_attributes.GEN_AI_AGENT_NAME: response_data.get("name", ""), + gen_ai_attributes.GEN_AI_REQUEST_MODEL: response_data.get("model", ""), + gen_ai_attributes.GEN_AI_SYSTEM_INSTRUCTIONS: response_data.get("instructions", "") + } + span.set_attributes(agent_attributes) + if operation_id in ["agents_api_v1_conversations_start", "agents_api_v1_conversations_append"]: + outputs = response_data.get("outputs", []) + conversation_attributes = { + gen_ai_attributes.GEN_AI_OPERATION_NAME: gen_ai_attributes.GenAiOperationNameValues.INVOKE_AGENT.value, + gen_ai_attributes.GEN_AI_CONVERSATION_ID: response_data.get("conversation_id", "") + } + span.set_attributes(conversation_attributes) + parent_context = set_span_in_context(span) + + for output in outputs: + # TODO: Only enrich the spans if it's a single turn conversation. + # Multi turn conversations are handled in the extra.run.tools.create_function_result function + if output["type"] == "function.call": + pass + if output["type"] == "tool.execution": + start_ns = parse_time_to_nanos(output["created_at"]) + end_ns = parse_time_to_nanos(output["completed_at"]) + child_span = tracer.start_span("Tool Execution", start_time=start_ns, context=parent_context) + child_span.set_attributes({"agent.trace.public": ""}) + tool_attributes = { + gen_ai_attributes.GEN_AI_OPERATION_NAME: gen_ai_attributes.GenAiOperationNameValues.EXECUTE_TOOL.value, + gen_ai_attributes.GEN_AI_TOOL_CALL_ID: output.get("id", ""), + MistralAIAttributes.MISTRAL_AI_TOOL_CALL_ARGUMENTS: output.get("arguments", ""), + gen_ai_attributes.GEN_AI_TOOL_NAME: output.get("name", "") + } + child_span.set_attributes(tool_attributes) + child_span.end(end_time=end_ns) + if output["type"] == "message.output": + start_ns = parse_time_to_nanos(output["created_at"]) + end_ns = parse_time_to_nanos(output["completed_at"]) + child_span = tracer.start_span("Message Output", start_time=start_ns, context=parent_context) + child_span.set_attributes({"agent.trace.public": ""}) + message_attributes = { + gen_ai_attributes.GEN_AI_OPERATION_NAME: gen_ai_attributes.GenAiOperationNameValues.CHAT.value, + gen_ai_attributes.GEN_AI_PROVIDER_NAME: gen_ai_attributes.GenAiProviderNameValues.MISTRAL_AI.value, + MistralAIAttributes.MISTRAL_AI_MESSAGE_ID: output.get("id", ""), + gen_ai_attributes.GEN_AI_AGENT_ID: output.get("agent_id", ""), + gen_ai_attributes.GEN_AI_REQUEST_MODEL: output.get("model", "") + } + child_span.set_attributes(message_attributes) + child_span.end(end_time=end_ns) + if operation_id == "ocr_v1_ocr_post": + usage_info = response_data.get("usage_info", "") + ocr_attributes = { + MistralAIAttributes.MISTRAL_AI_OPERATION_NAME: MistralAINameValues.OCR.value, + MistralAIAttributes.MISTRAL_AI_OCR_USAGE_PAGES_PROCESSED: usage_info.get("pages_processed", "") if usage_info else "", + MistralAIAttributes.MISTRAL_AI_OCR_USAGE_DOC_SIZE_BYTES: usage_info.get("doc_size_bytes", "") if usage_info else "", + gen_ai_attributes.GEN_AI_REQUEST_MODEL: response_data.get("model", "") + } + span.set_attributes(ocr_attributes) + + +class GenAISpanProcessor(SpanProcessor): + def on_start(self, span, parent_context = None): + span.set_attributes({"agent.trace.public": ""}) + + +def get_or_create_otel_tracer() -> tuple[bool, Tracer]: + """ + Get a tracer from the current TracerProvider. + + The SDK does not set up its own TracerProvider - it relies on the application + to configure OpenTelemetry. This follows OTEL best practices where: + - Libraries/SDKs get tracers from the global provider + - Applications configure the TracerProvider + + If no TracerProvider is configured, the ProxyTracerProvider (default) will + return a NoOp tracer, effectively disabling tracing. Once the application + sets up a real TracerProvider, subsequent spans will be recorded. + + Returns: + Tuple[bool, Tracer]: (tracing_enabled, tracer) + - tracing_enabled is True if a real TracerProvider is configured + - tracer is always valid (may be NoOp if no provider configured) + """ + tracer_provider = trace.get_tracer_provider() + tracer = tracer_provider.get_tracer(MISTRAL_SDK_OTEL_TRACER_NAME) + + # Tracing is considered enabled if we have a real TracerProvider (not the default proxy) + tracing_enabled = not isinstance(tracer_provider, trace.ProxyTracerProvider) + + return tracing_enabled, tracer + +def get_traced_request_and_span( + tracing_enabled: bool, + tracer: Tracer, + span: Span | None, + operation_id: str, + request: httpx.Request, +) -> tuple[httpx.Request, Span | None]: + if not tracing_enabled: + return request, span + + try: + span = tracer.start_span(name=operation_id) + span.set_attributes({"agent.trace.public": ""}) + # Inject the span context into the request headers to be used by the backend service to continue the trace + propagate.inject(request.headers, context=set_span_in_context(span)) + span = enrich_span_from_request(span, request) + except Exception: + logger.warning( + "%s %s", + TracingErrors.FAILED_TO_CREATE_SPAN_FOR_REQUEST, + traceback.format_exc() if MISTRAL_SDK_DEBUG_TRACING else DEBUG_HINT, + ) + if span: + end_span(span=span) + span = None + + return request, span + + +def get_traced_response( + tracing_enabled: bool, + tracer: Tracer, + span: Span | None, + operation_id: str, + response: httpx.Response, +) -> httpx.Response: + if not tracing_enabled or not span: + return response + try: + is_stream_response = not response.is_closed and not response.is_stream_consumed + if is_stream_response: + return TracedResponse.from_response(resp=response, span=span) + enrich_span_from_response( + tracer, span, operation_id, response + ) + except Exception: + logger.warning( + "%s %s", + TracingErrors.FAILED_TO_ENRICH_SPAN_WITH_RESPONSE, + traceback.format_exc() if MISTRAL_SDK_DEBUG_TRACING else DEBUG_HINT, + ) + if span: + end_span(span=span) + return response + +def get_response_and_error( + tracing_enabled: bool, + tracer: Tracer, + span: Span | None, + operation_id: str, + response: httpx.Response, + error: Exception | None, +) -> tuple[httpx.Response, Exception | None]: + if not tracing_enabled or not span: + return response, error + try: + if error: + span.record_exception(error) + span.set_status(Status(StatusCode.ERROR, str(error))) + if hasattr(response, "_content") and response._content: + response_body = json.loads(response._content) + if response_body.get("object", "") == "error": + if error_msg := response_body.get("message", ""): + attributes = { + http_attributes.HTTP_RESPONSE_STATUS_CODE: response.status_code, + MistralAIAttributes.MISTRAL_AI_ERROR_TYPE: response_body.get("type", ""), + MistralAIAttributes.MISTRAL_AI_ERROR_MESSAGE: error_msg, + MistralAIAttributes.MISTRAL_AI_ERROR_CODE: response_body.get("code", ""), + } + for attribute, value in attributes.items(): + if value: + span.set_attribute(attribute, value) + span.end() + span = None + except Exception: + logger.warning( + "%s %s", + TracingErrors.FAILED_TO_HANDLE_ERROR_IN_SPAN, + traceback.format_exc() if MISTRAL_SDK_DEBUG_TRACING else DEBUG_HINT, + ) + + if span: + span.end() + span = None + return response, error + + +def end_span(span: Span) -> None: + try: + span.end() + except Exception: + logger.warning( + "%s %s", + TracingErrors.FAILED_TO_END_SPAN, + traceback.format_exc() if MISTRAL_SDK_DEBUG_TRACING else DEBUG_HINT, + ) + +class TracedResponse(httpx.Response): + """ + TracedResponse is a subclass of httpx.Response that ends the span when the response is closed. + + This hack allows ending the span only once the stream is fully consumed. + """ + def __init__(self, *args, span: Span | None, **kwargs) -> None: + super().__init__(*args, **kwargs) + self.span = span + + def close(self) -> None: + if self.span: + end_span(span=self.span) + super().close() + + async def aclose(self) -> None: + if self.span: + end_span(span=self.span) + await super().aclose() + + @classmethod + def from_response(cls, resp: httpx.Response, span: Span | None) -> "TracedResponse": + traced_resp = cls.__new__(cls) + traced_resp.__dict__ = copy.copy(resp.__dict__) + traced_resp.span = span + + # Warning: this syntax bypasses the __init__ method. + # If you add init logic in the TracedResponse.__init__ method, you will need to add the following line for it to execute: + # traced_resp.__init__(your_arguments) + + return traced_resp diff --git a/src/mistralai/extra/realtime/__init__.py b/src/mistralai/extra/realtime/__init__.py new file mode 100644 index 00000000..7b80e045 --- /dev/null +++ b/src/mistralai/extra/realtime/__init__.py @@ -0,0 +1,25 @@ +from mistralai.client.models import ( + AudioEncoding, + AudioFormat, + RealtimeTranscriptionError, + RealtimeTranscriptionErrorDetail, + RealtimeTranscriptionSession, + RealtimeTranscriptionSessionCreated, + RealtimeTranscriptionSessionUpdated, +) + +from .connection import UnknownRealtimeEvent, RealtimeConnection +from .transcription import RealtimeTranscription + +__all__ = [ + "AudioEncoding", + "AudioFormat", + "RealtimeTranscriptionError", + "RealtimeTranscriptionErrorDetail", + "RealtimeTranscriptionSession", + "RealtimeTranscriptionSessionCreated", + "RealtimeTranscriptionSessionUpdated", + "RealtimeConnection", + "RealtimeTranscription", + "UnknownRealtimeEvent", +] diff --git a/src/mistralai/extra/realtime/connection.py b/src/mistralai/extra/realtime/connection.py new file mode 100644 index 00000000..6547052b --- /dev/null +++ b/src/mistralai/extra/realtime/connection.py @@ -0,0 +1,232 @@ +from __future__ import annotations + +import base64 +import json +from asyncio import CancelledError +from collections import deque +from typing import Any, AsyncIterator, Deque, Optional, Union + +from pydantic import ValidationError, BaseModel + +try: + from websockets.asyncio.client import ClientConnection # websockets >= 13.0 +except ImportError as exc: + raise ImportError( + "The `websockets` package (>=13.0) is required for real-time transcription. " + "Install with: pip install 'mistralai[realtime]'" + ) from exc + +from mistralai.client.models import ( + AudioFormat, + RealtimeTranscriptionInputAudioAppend, + RealtimeTranscriptionInputAudioEnd, + RealtimeTranscriptionInputAudioFlush, + RealtimeTranscriptionError, + RealtimeTranscriptionSession, + RealtimeTranscriptionSessionCreated, + RealtimeTranscriptionSessionUpdated, + RealtimeTranscriptionSessionUpdateMessage, + RealtimeTranscriptionSessionUpdatePayload, + TranscriptionStreamDone, + TranscriptionStreamLanguage, + TranscriptionStreamSegmentDelta, + TranscriptionStreamTextDelta, +) +from mistralai.client.types import UNSET + + +class UnknownRealtimeEvent(BaseModel): + """ + Forward-compat fallback event: + - unknown message type + - invalid JSON payload + - schema validation failure + """ + + type: Optional[str] + content: Any + error: Optional[str] = None + + +RealtimeEvent = Union[ + # session lifecycle + RealtimeTranscriptionSessionCreated, + RealtimeTranscriptionSessionUpdated, + # server errors + RealtimeTranscriptionError, + # transcription events + TranscriptionStreamLanguage, + TranscriptionStreamSegmentDelta, + TranscriptionStreamTextDelta, + TranscriptionStreamDone, + # forward-compat fallback + UnknownRealtimeEvent, +] + +_MESSAGE_MODELS: dict[str, Any] = { + "session.created": RealtimeTranscriptionSessionCreated, + "session.updated": RealtimeTranscriptionSessionUpdated, + "error": RealtimeTranscriptionError, + "transcription.language": TranscriptionStreamLanguage, + "transcription.segment": TranscriptionStreamSegmentDelta, + "transcription.text.delta": TranscriptionStreamTextDelta, + "transcription.done": TranscriptionStreamDone, +} + + +def parse_realtime_event(payload: Any) -> RealtimeEvent: + """ + Tolerant parser: + - unknown event type -> UnknownRealtimeEvent + - validation failures -> UnknownRealtimeEvent (includes error string) + - invalid payload -> UnknownRealtimeEvent + """ + if not isinstance(payload, dict): + return UnknownRealtimeEvent( + type=None, content=payload, error="expected JSON object" + ) + + msg_type = payload.get("type") + if not isinstance(msg_type, str): + return UnknownRealtimeEvent( + type=None, content=payload, error="missing/invalid 'type'" + ) + + model_cls = _MESSAGE_MODELS.get(msg_type) + if model_cls is None: + return UnknownRealtimeEvent( + type=msg_type, content=payload, error="unknown event type" + ) + try: + parsed = model_cls.model_validate(payload) + return parsed + except ValidationError as exc: + return UnknownRealtimeEvent(type=msg_type, content=payload, error=str(exc)) + + +class RealtimeConnection: + def __init__( + self, + websocket: ClientConnection, + session: RealtimeTranscriptionSession, + *, + initial_events: Optional[list[RealtimeEvent]] = None, + ) -> None: + self._websocket = websocket + self._session = session + self._closed = False + self._initial_events: Deque[RealtimeEvent] = deque(initial_events or []) + + @property + def request_id(self) -> str: + return self._session.request_id + + @property + def session(self) -> RealtimeTranscriptionSession: + return self._session + + @property + def audio_format(self) -> AudioFormat: + return self._session.audio_format + + @property + def is_closed(self) -> bool: + return self._closed + + async def send_audio( + self, audio_bytes: Union[bytes, bytearray, memoryview] + ) -> None: + if self._closed: + raise RuntimeError("Connection is closed") + + message = RealtimeTranscriptionInputAudioAppend( + audio=base64.b64encode(bytes(audio_bytes)).decode("ascii") + ) + await self._websocket.send(message.model_dump_json()) + + async def flush_audio(self) -> None: + if self._closed: + raise RuntimeError("Connection is closed") + await self._websocket.send( + RealtimeTranscriptionInputAudioFlush().model_dump_json() + ) + + async def update_session( + self, + audio_format: Optional[AudioFormat] = None, + *, + target_streaming_delay_ms: Optional[int] = None, + ) -> None: + if self._closed: + raise RuntimeError("Connection is closed") + + if audio_format is None and target_streaming_delay_ms is None: + raise ValueError("At least one session field must be provided") + + message = RealtimeTranscriptionSessionUpdateMessage( + session=RealtimeTranscriptionSessionUpdatePayload( + audio_format=audio_format if audio_format is not None else UNSET, + target_streaming_delay_ms=target_streaming_delay_ms + if target_streaming_delay_ms is not None + else UNSET, + ) + ) + await self._websocket.send(message.model_dump_json()) + + async def end_audio(self) -> None: + if self._closed: + return + await self._websocket.send( + RealtimeTranscriptionInputAudioEnd().model_dump_json() + ) + + async def close(self, *, code: int = 1000, reason: str = "") -> None: + if self._closed: + return + self._closed = True + await self._websocket.close(code=code, reason=reason) + + async def __aenter__(self) -> "RealtimeConnection": + return self + + async def __aexit__(self, exc_type, exc, tb) -> None: + await self.close() + + def __aiter__(self) -> AsyncIterator[RealtimeEvent]: + return self.events() + + async def events(self) -> AsyncIterator[RealtimeEvent]: + # replay any handshake/prelude events (including session.created) + while self._initial_events: + ev = self._initial_events.popleft() + self._apply_session_updates(ev) + yield ev + + try: + async for msg in self._websocket: + text = ( + msg.decode("utf-8", errors="replace") + if isinstance(msg, (bytes, bytearray)) + else msg + ) + try: + data = json.loads(text) + except Exception as exc: + yield UnknownRealtimeEvent( + type=None, content=text, error=f"invalid JSON: {exc}" + ) + continue + + ev = parse_realtime_event(data) + self._apply_session_updates(ev) + yield ev + except CancelledError: + pass + finally: + await self.close() + + def _apply_session_updates(self, ev: RealtimeEvent) -> None: + if isinstance(ev, RealtimeTranscriptionSessionCreated) or isinstance( + ev, RealtimeTranscriptionSessionUpdated + ): + self._session = ev.session diff --git a/src/mistralai/extra/realtime/transcription.py b/src/mistralai/extra/realtime/transcription.py new file mode 100644 index 00000000..b216e676 --- /dev/null +++ b/src/mistralai/extra/realtime/transcription.py @@ -0,0 +1,278 @@ +from __future__ import annotations + +import asyncio +import json +import time +from typing import AsyncIterator, Mapping, Optional +from urllib.parse import parse_qsl, urlencode, urlparse, urlunparse + +try: + from websockets.asyncio.client import ( + ClientConnection, + connect, + ) # websockets >= 13.0 +except ImportError as exc: + raise ImportError( + "The `websockets` package (>=13.0) is required for real-time transcription. " + "Install with: pip install 'mistralai[realtime]'" + ) from exc + +from mistralai.client import models, utils +from mistralai.client.models import ( + AudioFormat, + RealtimeTranscriptionError, + RealtimeTranscriptionSession, + RealtimeTranscriptionSessionCreated, +) +from mistralai.client.sdkconfiguration import SDKConfiguration +from mistralai.client.utils import generate_url, get_security, get_security_from_env + +from ..exceptions import RealtimeTranscriptionException, RealtimeTranscriptionWSError +from .connection import ( + RealtimeConnection, + RealtimeEvent, + UnknownRealtimeEvent, + parse_realtime_event, +) + + +class RealtimeTranscription: + """Client for realtime transcription over WebSocket (websockets >= 13.0).""" + + def __init__(self, sdk_config: SDKConfiguration) -> None: + self._sdk_config = sdk_config + + def _build_url( + self, + model: str, + *, + server_url: Optional[str], + query_params: Mapping[str, str], + ) -> str: + if server_url is not None: + base_url = utils.remove_suffix(server_url, "/") + else: + base_url, _ = self._sdk_config.get_server_details() + + url = generate_url(base_url, "/v1/audio/transcriptions/realtime", None) + + parsed = urlparse(url) + merged = dict(parse_qsl(parsed.query, keep_blank_values=True)) + merged["model"] = model + merged.update(dict(query_params)) + + return urlunparse(parsed._replace(query=urlencode(merged))) + + async def connect( + self, + model: str, + audio_format: Optional[AudioFormat] = None, + target_streaming_delay_ms: Optional[int] = None, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> RealtimeConnection: + if timeout_ms is None: + timeout_ms = self._sdk_config.timeout_ms + + security = self._sdk_config.security + if security is not None and callable(security): + security = security() + + resolved_security = get_security_from_env(security, models.Security) + + headers: dict[str, str] = {} + query_params: dict[str, str] = {} + + if resolved_security is not None: + security_headers, security_query = get_security(resolved_security) + headers |= security_headers + for key, values in security_query.items(): + if values: + query_params[key] = values[-1] + + if http_headers is not None: + headers |= dict(http_headers) + + url = self._build_url(model, server_url=server_url, query_params=query_params) + + parsed = urlparse(url) + if parsed.scheme == "https": + parsed = parsed._replace(scheme="wss") + elif parsed.scheme == "http": + parsed = parsed._replace(scheme="ws") + ws_url = urlunparse(parsed) + open_timeout = None if timeout_ms is None else timeout_ms / 1000.0 + user_agent = self._sdk_config.user_agent + + websocket: Optional[ClientConnection] = None + try: + websocket = await connect( + ws_url, + additional_headers=dict(headers), + open_timeout=open_timeout, + user_agent_header=user_agent, + ) + + session, initial_events = await _recv_handshake( + websocket, timeout_ms=timeout_ms + ) + connection = RealtimeConnection( + websocket=websocket, + session=session, + initial_events=initial_events, + ) + + if audio_format is not None or target_streaming_delay_ms is not None: + await connection.update_session( + audio_format, + target_streaming_delay_ms=target_streaming_delay_ms, + ) + + return connection + + except RealtimeTranscriptionException: + if websocket is not None: + await websocket.close() + raise + except Exception as exc: + if websocket is not None: + await websocket.close() + raise RealtimeTranscriptionException(f"Failed to connect: {exc}") from exc + + async def transcribe_stream( + self, + audio_stream: AsyncIterator[bytes], + model: str, + audio_format: Optional[AudioFormat] = None, + target_streaming_delay_ms: Optional[int] = None, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> AsyncIterator[RealtimeEvent]: + """ + Flow + - opens connection + - streams audio in background + - yields events from the connection + """ + async with await self.connect( + model=model, + audio_format=audio_format, + target_streaming_delay_ms=target_streaming_delay_ms, + server_url=server_url, + timeout_ms=timeout_ms, + http_headers=http_headers, + ) as connection: + + async def _send() -> None: + async for chunk in audio_stream: + if connection.is_closed: + break + await connection.send_audio(chunk) + await connection.flush_audio() + await connection.end_audio() + + send_task = asyncio.create_task(_send()) + + try: + async for event in connection: + yield event + + # stop early (caller still sees the terminating event) + if isinstance(event, RealtimeTranscriptionError): + break + if getattr(event, "type", None) == "transcription.done": + break + finally: + send_task.cancel() + try: + await send_task + except asyncio.CancelledError: + pass + await connection.close() + + +def _extract_error_message(payload: dict) -> str: + err = payload.get("error") + if isinstance(err, dict): + msg = err.get("message") + if isinstance(msg, str): + return msg + if isinstance(msg, dict): + detail = msg.get("detail") + if isinstance(detail, str): + return detail + return "Realtime transcription error" + + +async def _recv_handshake( + websocket: ClientConnection, + *, + timeout_ms: Optional[int], +) -> tuple[RealtimeTranscriptionSession, list[RealtimeEvent]]: + """ + Read messages until session.created or error. + Replay all messages read during handshake as initial events (lossless). + """ + timeout_s = None if timeout_ms is None else timeout_ms / 1000.0 + deadline = None if timeout_s is None else (time.monotonic() + timeout_s) + + initial_events: list[RealtimeEvent] = [] + + def remaining() -> Optional[float]: + if deadline is None: + return None + return max(0.0, deadline - time.monotonic()) + + try: + while True: + raw = await asyncio.wait_for(websocket.recv(), timeout=remaining()) + text = ( + raw.decode("utf-8", errors="replace") + if isinstance(raw, (bytes, bytearray)) + else raw + ) + + try: + payload = json.loads(text) + except Exception as exc: + initial_events.append( + UnknownRealtimeEvent( + type=None, content=text, error=f"invalid JSON: {exc}" + ) + ) + continue + + msg_type = payload.get("type") if isinstance(payload, dict) else None + if msg_type == "error" and isinstance(payload, dict): + parsed = parse_realtime_event(payload) + initial_events.append(parsed) + if isinstance(parsed, RealtimeTranscriptionError): + raise RealtimeTranscriptionWSError( + _extract_error_message(payload), + payload=parsed, + raw=payload, + ) + raise RealtimeTranscriptionWSError( + _extract_error_message(payload), + payload=None, + raw=payload, + ) + + event = parse_realtime_event(payload) + initial_events.append(event) + + if isinstance(event, RealtimeTranscriptionSessionCreated): + return event.session, initial_events + + except asyncio.TimeoutError as exc: + raise RealtimeTranscriptionException( + "Timeout waiting for session creation." + ) from exc + except RealtimeTranscriptionException: + raise + except Exception as exc: + raise RealtimeTranscriptionException( + f"Unexpected websocket handshake failure: {exc}" + ) from exc diff --git a/src/mistralai/models/__init__.py b/src/mistralai/extra/run/__init__.py similarity index 100% rename from src/mistralai/models/__init__.py rename to src/mistralai/extra/run/__init__.py diff --git a/src/mistralai/extra/run/context.py b/src/mistralai/extra/run/context.py new file mode 100644 index 00000000..7ade705f --- /dev/null +++ b/src/mistralai/extra/run/context.py @@ -0,0 +1,287 @@ +import asyncio +import inspect +import typing +from collections.abc import Callable +from contextlib import AsyncExitStack +from dataclasses import dataclass, field +from functools import wraps +from logging import getLogger + +import pydantic + +from mistralai.extra import response_format_from_pydantic_model +from mistralai.extra.exceptions import RunException +from mistralai.extra.mcp.base import MCPClientProtocol +from mistralai.extra.run.result import RunResult +from mistralai.extra.run.tools import ( + RunCoroutine, + RunFunction, + RunMCPTool, + RunTool, + create_function_result, + create_tool_call, +) +from mistralai.client.models import ( + CompletionArgs, + CompletionArgsTypedDict, + ConversationInputs, + ConversationInputsTypedDict, + ConversationRequestTool, + ConversationRequestToolTypedDict, + FunctionCallEntry, + FunctionResultEntry, + FunctionTool, + InputEntries, + MessageInputEntry, + ResponseFormat, + UnknownAgentTool, + UpdateAgentRequestTool, +) +from mistralai.client.types.basemodel import BaseModel, OptionalNullable, UNSET + +if typing.TYPE_CHECKING: + from mistralai.client import Beta, OptionalNullable + +logger = getLogger(__name__) + + +class AgentRequestKwargs(typing.TypedDict): + agent_id: str + + +class ModelRequestKwargs(typing.TypedDict): + model: str + instructions: OptionalNullable[str] + tools: OptionalNullable[list[ConversationRequestTool] | list[ConversationRequestToolTypedDict]] + completion_args: OptionalNullable[CompletionArgs | CompletionArgsTypedDict] + + +@dataclass +class RunContext: + """A context for running a conversation with an agent or a model. + + The context can be used to execute function calls, connect to MCP server, and keep track of information about + the run. + + Args: + conversation_id (Options[str]): The unique identifier for the conversation. This is + passed if the user wants to continue an existing conversation. + model (Options[str]): The model name to be used for the conversation. Can't be used along with 'agent_id'. + agent_id (Options[str]): The agent id to be used for the conversation. Can't be used along with 'model'. + output_format (type[BaseModel] | None): The output format expected from the conversation. It represents + the `response_format` which is part of the `CompletionArgs`. + request_count (int): The number of requests made in the current `RunContext`. + continue_on_fn_error (bool): Flag to determine if the conversation should continue when function execution + resulted in an error. + """ + + _exit_stack: AsyncExitStack = field(init=False) + _callable_tools: dict[str, RunTool] = field(init=False, default_factory=dict) + _mcp_clients: list[MCPClientProtocol] = field(init=False, default_factory=list) + + conversation_id: str | None = field(default=None) + model: str | None = field(default=None) + agent_id: str | None = field(default=None) + output_format: type[BaseModel] | None = field(default=None) + request_count: int = field(default=0) + continue_on_fn_error: bool = field(default=False) + + def __post_init__(self): + if self.model and self.agent_id: + raise RunException("Only one for model or agent_id should be set") + self._exit_stack = AsyncExitStack() + + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc_val, exc_tb): + await self._exit_stack.aclose() + for mcp_client in self._mcp_clients: + await mcp_client.aclose() + + def register_func(self, func: Callable): + """Add a function to the context.""" + if not inspect.isfunction(func): + raise RunException( + "Only object of type function can be registered at the moment." + ) + + if inspect.iscoroutinefunction(func): + self._callable_tools[func.__name__] = RunCoroutine( + name=func.__name__, + awaitable=func, + tool=create_tool_call(func), + ) + else: + self._callable_tools[func.__name__] = RunFunction( + name=func.__name__, + callable=func, + tool=create_tool_call(func), + ) + + @wraps(func) + def wrapper(*args, **kwargs): + logger.info(f"Executing {func.__name__}") + return func(*args, **kwargs) + + return wrapper + + async def register_mcp_clients(self, mcp_clients: list[MCPClientProtocol]) -> None: + """Registering multiple MCP clients at the same time in the same asyncio.Task.""" + for mcp_client in mcp_clients: + await self.register_mcp_client(mcp_client) + + async def register_mcp_client(self, mcp_client: MCPClientProtocol) -> None: + """Add a MCP client to the context.""" + await mcp_client.initialize(exit_stack=self._exit_stack) + tools = await mcp_client.get_tools() + for tool in tools: + logger.info( + f"Adding tool {tool.function.name} from {mcp_client._name or 'mcp client'}" + ) + self._callable_tools[tool.function.name] = RunMCPTool( + name=tool.function.name, + tool=tool, + mcp_client=mcp_client, + ) + self._mcp_clients.append(mcp_client) + + async def execute_function_calls( + self, function_calls: list[FunctionCallEntry] + ) -> list[FunctionResultEntry]: + """Execute function calls and create function results from them.""" + if not all( + function_call.name in self._callable_tools + for function_call in function_calls + ): + logger.warning("Can't execute all functions, stopping run here") + return [] + function_result_tasks = [] + for function_call in function_calls: + function_result_tasks.append( + asyncio.create_task( + create_function_result( + function_call=function_call, + run_tool=self._callable_tools[function_call.name], + continue_on_fn_error=self.continue_on_fn_error, + ) + ) + ) + await asyncio.gather(*function_result_tasks) + return [task.result() for task in function_result_tasks] + + def get_tools(self) -> list[FunctionTool]: + """Get the tools that are part of the context.""" + callable_tools = [ + run_functions.tool for run_functions in self._callable_tools.values() + ] + return callable_tools + + async def prepare_agent_request(self, beta_client: "Beta") -> AgentRequestKwargs: + """Prepare an agent request with the functions added to the context. + + Update the agent definition before making the request. + """ + if self.agent_id is None: + raise RunException( + "Can't prepare an agent request, if no agent_id is provided" + ) + agent = await beta_client.agents.get_async(agent_id=self.agent_id) + agent_tools = agent.tools or [] + updated_tools: list[UpdateAgentRequestTool] = [] + for tool in agent_tools: + if isinstance(tool, UnknownAgentTool): + # Skip unknown tools - can't include them in update request + continue + if not isinstance(tool, FunctionTool): + updated_tools.append(tool) + elif tool.function.name in self._callable_tools: + # function already exists in the agent, don't add it again + continue + else: + updated_tools.append(tool) + updated_tools += self.get_tools() + completion_args = ( + CompletionArgs(response_format=self.response_format) + if self.output_format + else None + ) + beta_client.agents.update( + agent_id=self.agent_id, tools=updated_tools, completion_args=completion_args + ) + return AgentRequestKwargs(agent_id=self.agent_id) + + async def prepare_model_request( + self, + tools: OptionalNullable[list[ConversationRequestTool] | list[ConversationRequestToolTypedDict]] = UNSET, + completion_args: OptionalNullable[CompletionArgs | CompletionArgsTypedDict] = UNSET, + instructions: OptionalNullable[str] = None, + ) -> ModelRequestKwargs: + if self.model is None: + raise RunException("Can't prepare a model request, if no model is provided") + if not completion_args and self.output_format: + completion_args = CompletionArgs(response_format=self.response_format) + elif isinstance(completion_args, CompletionArgs) and self.output_format: + completion_args.response_format = self.response_format + elif isinstance(completion_args, dict) and self.output_format: + completion_args = CompletionArgs.model_validate(completion_args) + completion_args.response_format = self.response_format + request_tools = [] + if isinstance(tools, list): + for tool in tools: + request_tools.append(typing.cast(ConversationRequestTool, tool)) + for tool in self.get_tools(): + request_tools.append(tool) + return ModelRequestKwargs( + model=self.model, + tools=request_tools, + instructions=instructions, + completion_args=completion_args, + ) + + @property + def response_format(self) -> ResponseFormat: + if not self.output_format: + raise RunException("No response format exist for the current RunContext.") + return response_format_from_pydantic_model(self.output_format) + + +async def _validate_run( + *, + beta_client: "Beta", + run_ctx: RunContext, + inputs: ConversationInputs | ConversationInputsTypedDict, + instructions: OptionalNullable[str] = UNSET, + tools: OptionalNullable[list[ConversationRequestTool] | list[ConversationRequestToolTypedDict]] = UNSET, + completion_args: OptionalNullable[CompletionArgs | CompletionArgsTypedDict] = UNSET, +) -> tuple[ + AgentRequestKwargs | ModelRequestKwargs, RunResult, list[InputEntries] +]: + input_entries: list[InputEntries] = [] + if isinstance(inputs, str): + input_entries.append(MessageInputEntry(role="user", content=inputs)) + else: + for input in inputs: + if isinstance(input, dict): + input_entries.append( + pydantic.TypeAdapter(InputEntries).validate_python(input) + ) + run_result = RunResult( + input_entries=input_entries, + output_model=run_ctx.output_format, + conversation_id=run_ctx.conversation_id, + ) + req: AgentRequestKwargs | ModelRequestKwargs + if run_ctx.agent_id: + if tools or completion_args: + raise RunException("Can't set tools or completion_args when using an agent") + req = await run_ctx.prepare_agent_request(beta_client=beta_client) + elif run_ctx.model: + req = await run_ctx.prepare_model_request( + instructions=instructions, + tools=tools, + completion_args=completion_args, + ) + else: + raise RunException("Either agent_id or model must be set in the run context") + return req, run_result, input_entries diff --git a/src/mistralai/extra/run/result.py b/src/mistralai/extra/run/result.py new file mode 100644 index 00000000..6e2bcc8a --- /dev/null +++ b/src/mistralai/extra/run/result.py @@ -0,0 +1,211 @@ +import datetime +import json +import typing +from dataclasses import dataclass, field +from typing import Annotated, Literal + +from pydantic import BaseModel, Discriminator, Tag + +from mistralai.extra.utils.response_format import pydantic_model_from_json +from mistralai.client.models import ( + FunctionResultEntry, + FunctionCallEntry, + MessageOutputEntry, + AgentHandoffEntry, + ToolExecutionEntry, + MessageInputEntry, + AgentHandoffDoneEvent, + AgentHandoffStartedEvent, + ResponseDoneEvent, + ResponseErrorEvent, + ResponseStartedEvent, + FunctionCallEvent, + MessageOutputEvent, + ToolExecutionDoneEvent, + ToolExecutionStartedEvent, + ConversationEventsData, + MessageOutputEventContent, + MessageOutputEntryContent, + TextChunk, + MessageOutputContentChunks, + SSETypes, + InputEntries, + ToolFileChunk, + ToolReferenceChunk, + FunctionCallEntryArguments, +) +from mistralai.client.utils import get_discriminator + +RunOutputEntries = ( + MessageOutputEntry + | FunctionCallEntry + | FunctionResultEntry + | AgentHandoffEntry + | ToolExecutionEntry +) + +RunEntries = RunOutputEntries | MessageInputEntry + + +def as_text(entry: RunOutputEntries) -> str: + """Keep only the messages and turn content into textual representation.""" + text = "" + if isinstance(entry, MessageOutputEntry): + if isinstance(entry.content, str): + text += entry.content + else: + for chunk in entry.content: + if isinstance(chunk, TextChunk): + text += chunk.text + elif isinstance(chunk, ToolFileChunk): + text += f"" + elif isinstance(chunk, ToolReferenceChunk): + text += f"" + return text + + +def reconstitute_message_content( + chunks: list[MessageOutputEventContent], +) -> MessageOutputEntryContent: + """Given a list of MessageOutputEventContent, recreate a normalised MessageOutputEntryContent.""" + if all(isinstance(chunk, str) for chunk in chunks): + return "".join(typing.cast(list[str], chunks)) + content: list[MessageOutputContentChunks] = [] + for chunk in chunks: + if isinstance(chunk, str): + chunk = TextChunk(text=chunk) + if isinstance(chunk, TextChunk): + if len(content) and isinstance(content[-1], TextChunk): + content[-1].text += chunk.text + else: + content.append(chunk) + else: + content.append(chunk) + return content + + +def reconstitute_function_call_args(chunks: list[str]) -> FunctionCallEntryArguments: + """Recreates function call arguments from stream""" + return typing.cast(FunctionCallEntryArguments, "".join(chunks)) + + +def reconstitue_entries( + received_event_tracker: dict[int, list[ConversationEventsData]], +) -> list[RunOutputEntries]: + """Given a list of events, recreate the corresponding entries.""" + run_entries: list[RunOutputEntries] = [] + for idx, events in sorted(received_event_tracker.items(), key=lambda x: x[0]): + first_event = events[0] + if isinstance(first_event, MessageOutputEvent): + message_events = typing.cast(list[MessageOutputEvent], events) + run_entries.append( + MessageOutputEntry( + content=reconstitute_message_content( + chunks=[ + message_event.content for message_event in message_events + ] + ), + created_at=first_event.created_at, + id=first_event.id, + agent_id=first_event.agent_id, + model=first_event.model, + role=first_event.role, + ) + ) + elif isinstance(first_event, FunctionCallEvent): + function_call_events = typing.cast(list[FunctionCallEvent], events) + run_entries.append( + FunctionCallEntry( + name=first_event.name, + arguments=reconstitute_function_call_args( + chunks=[ + function_call_event.arguments + for function_call_event in function_call_events + ] + ), + created_at=first_event.created_at, + id=first_event.id, + tool_call_id=first_event.tool_call_id, + ) + ) + return run_entries + + +@dataclass +class RunFiles: + id: str + name: str + content: bytes + + +@dataclass +class RunResult: + input_entries: list[InputEntries] + conversation_id: str | None = field(default=None) + output_entries: list[RunOutputEntries] = field(default_factory=list) + files: dict[str, RunFiles] = field(default_factory=dict) + output_model: type[BaseModel] | None = field(default=None) + + def get_file(self, file_id: str) -> RunFiles | None: + return self.files.get(file_id) + + @property + def entries(self) -> list[RunEntries]: + return [*self.input_entries, *self.output_entries] + + @property + def output_as_text(self) -> str: + if not self.output_entries: + raise ValueError("No output entries were started.") + return "\n".join( + as_text(entry) + for entry in self.output_entries + if entry.type == "message.output" + ) + + @property + def output_as_model(self) -> BaseModel: + if self.output_model is None: + raise ValueError("No output format was not set.") + return pydantic_model_from_json( + json.loads(self.output_as_text), self.output_model + ) + + +class FunctionResultEvent(BaseModel): + id: str | None = None + + type: Literal["function.result"] | None = "function.result" + + result: str + + tool_call_id: str + + created_at: datetime.datetime | None = datetime.datetime.now( + tz=datetime.timezone.utc + ) + + output_index: int | None = 0 + + +RunResultEventsType = SSETypes | Literal["function.result"] + +RunResultEventsData = typing.Annotated[ + Annotated[AgentHandoffDoneEvent, Tag("agent.handoff.done")] + | Annotated[AgentHandoffStartedEvent, Tag("agent.handoff.started")] + | Annotated[ResponseDoneEvent, Tag("conversation.response.done")] + | Annotated[ResponseErrorEvent, Tag("conversation.response.error")] + | Annotated[ResponseStartedEvent, Tag("conversation.response.started")] + | Annotated[FunctionCallEvent, Tag("function.call.delta")] + | Annotated[MessageOutputEvent, Tag("message.output.delta")] + | Annotated[ToolExecutionDoneEvent, Tag("tool.execution.done")] + | Annotated[ToolExecutionStartedEvent, Tag("tool.execution.started")] + | Annotated[FunctionResultEvent, Tag("function.result")], + Discriminator(lambda m: get_discriminator(m, "type", "type")), +] + + +class RunResultEvents(BaseModel): + event: RunResultEventsType + + data: RunResultEventsData diff --git a/src/mistralai/extra/run/tools.py b/src/mistralai/extra/run/tools.py new file mode 100644 index 00000000..18c1d3dd --- /dev/null +++ b/src/mistralai/extra/run/tools.py @@ -0,0 +1,235 @@ +import inspect +import itertools +import json +import logging +from dataclasses import dataclass +from typing import Any, Callable, ForwardRef, Sequence, cast, get_type_hints + +import opentelemetry.semconv._incubating.attributes.gen_ai_attributes as gen_ai_attributes +from griffe import ( + Docstring, + DocstringSectionKind, + DocstringSectionText, + DocstringParameter, + DocstringSection, +) +from opentelemetry import trace +from pydantic import Field, create_model +from pydantic.fields import FieldInfo + +from mistralai.extra.exceptions import RunException +from mistralai.extra.mcp.base import MCPClientProtocol +from mistralai.extra.observability.otel import GenAISpanEnum, MistralAIAttributes, set_available_attributes +from mistralai.extra.run.result import RunOutputEntries +from mistralai.client.models import ( + FunctionResultEntry, + FunctionTool, + Function, + FunctionCallEntry, +) + + +logger = logging.getLogger(__name__) + + +@dataclass +class RunFunction: + name: str + callable: Callable + tool: FunctionTool + + +@dataclass +class RunCoroutine: + name: str + awaitable: Callable + tool: FunctionTool + + +@dataclass +class RunMCPTool: + name: str + tool: FunctionTool + mcp_client: MCPClientProtocol + + +RunTool = RunFunction | RunCoroutine | RunMCPTool + + +def _get_function_description(docstring_sections: list[DocstringSection]) -> str: + """Given a list of docstring sections create a description for the function.""" + text_sections: list[DocstringSectionText] = [] + for section in docstring_sections: + if section.kind == DocstringSectionKind.text: + text_sections.append(cast(DocstringSectionText, section)) + return "\n".join(text.value for text in text_sections) + + +def _get_function_parameters( + docstring_sections: list[DocstringSection], + params_from_sig: list[inspect.Parameter], + type_hints: dict[str, Any], +): + """Given a list of docstring sections and type annotations create the most accurate tool parameters""" + params_from_docstrings: list[DocstringParameter] = list( + itertools.chain.from_iterable( + section.value + for section in docstring_sections + if section.kind + in (DocstringSectionKind.parameters, DocstringSectionKind.other_parameters) + ) + ) + + # Extract all description and annotation + param_descriptions = {} + param_annotations = {} + + for param_doc in params_from_docstrings: + param_descriptions[param_doc.name] = param_doc.description + + for param in params_from_sig: + if param.name not in param_descriptions: + param_descriptions[param.name] = "" + param_annotations[param.name] = type_hints.get(param.name) + + # resolve all params into Field and create the parameters schema + fields: dict[str, tuple[type, FieldInfo]] = {} + for p in params_from_sig: + default = p.default if p.default is not inspect.Parameter.empty else ... + annotation = ( + p.annotation if p.annotation is not inspect.Parameter.empty else Any + ) + # handle forward ref with the help of get_type_hints + if isinstance(annotation, str): + annotation = type_hints[p.name] + + if isinstance(default, FieldInfo): + field_info = default + else: + # If the annotation is Annotated[..., Field(...)] extract the Field and annotation + # Otherwise, just use the annotation as-is + field_info = None + # If it's Annotated[..., SomeFieldMarker(...)], find it + if hasattr(annotation, "__metadata__") and hasattr(annotation, "__args__"): + # It's Annotated + # e.g. Annotated[str, Field(...)] + # Extract the first Field(...) or None if not found + for meta in annotation.__metadata__: # type: ignore + if isinstance(meta, FieldInfo): + field_info = meta + break + # The actual annotation is the first part of Annotated + annotation = annotation.__args__[0] # type: ignore + + # handle forward ref with the help of get_type_hints + if isinstance(annotation, ForwardRef): + annotation = param_annotations[p.name] + + # no Field + if field_info is None: + if default is ...: + field_info = Field() + else: + field_info = Field(default=default) + + field_info.description = param_descriptions[p.name] + fields[p.name] = (cast(type, annotation), field_info) + + schema = create_model("_", **fields).model_json_schema() # type: ignore[call-overload] + schema.pop("title", None) + for prop in schema.get("properties", {}).values(): + prop.pop("title", None) + return schema + + +def create_tool_call(func: Callable) -> FunctionTool: + """Parse a function docstring / type annotations to create a FunctionTool.""" + name = func.__name__ + + # Inspect and parse the docstring of the function + doc = inspect.getdoc(func) + docstring_sections: list[DocstringSection] + if not doc: + logger.warning( + f"Function '{name}' without a docstring is being parsed, add docstring for more accurate result." + ) + docstring_sections = [] + else: + docstring = Docstring(doc, parser="google") + docstring_sections = docstring.parse(warnings=False) + if len(docstring_sections) == 0: + logger.warning( + f"Function '{name}' has no relevant docstring sections, add docstring for more accurate result." + ) + + # Extract the function's signature and type hints + sig = inspect.signature(func) + params_from_sig = list(sig.parameters.values()) + type_hints = get_type_hints(func, include_extras=True, localns=None, globalns=None) + + return FunctionTool( + function=Function( + name=name, + description=_get_function_description(docstring_sections), + parameters=_get_function_parameters( + docstring_sections=docstring_sections, + params_from_sig=params_from_sig, + type_hints=type_hints, + ), + strict=True, + ), + ) + + +async def create_function_result( + function_call: FunctionCallEntry, + run_tool: RunTool, + continue_on_fn_error: bool = False, +) -> FunctionResultEntry: + """Run the function with arguments of a FunctionCallEntry.""" + arguments = ( + json.loads(function_call.arguments) + if isinstance(function_call.arguments, str) + else function_call.arguments + ) + tracer = trace.get_tracer(__name__) + with tracer.start_as_current_span(GenAISpanEnum.function_call(function_call.name)) as span: + try: + if isinstance(run_tool, RunFunction): + res = run_tool.callable(**arguments) + elif isinstance(run_tool, RunCoroutine): + res = await run_tool.awaitable(**arguments) + elif isinstance(run_tool, RunMCPTool): + res = await run_tool.mcp_client.execute_tool(function_call.name, arguments) + function_call_attributes = { + gen_ai_attributes.GEN_AI_OPERATION_NAME: gen_ai_attributes.GenAiOperationNameValues.EXECUTE_TOOL.value, + gen_ai_attributes.GEN_AI_TOOL_CALL_ID: function_call.id, + MistralAIAttributes.MISTRAL_AI_TOOL_CALL_ARGUMENTS: str(function_call.arguments), + gen_ai_attributes.GEN_AI_TOOL_NAME: function_call.name + } + set_available_attributes(span, function_call_attributes) + except Exception as e: + if continue_on_fn_error is True: + return FunctionResultEntry( + tool_call_id=function_call.tool_call_id, + result=f"Error while executing {function_call.name}: {str(e)}", + ) + raise RunException( + f"Failed to execute tool {function_call.name} with arguments '{function_call.arguments}'" + ) from e + + return FunctionResultEntry( + tool_call_id=function_call.tool_call_id, + result=res if isinstance(res, str) else json.dumps(res), + ) + + +def get_function_calls( + output_entries: Sequence[RunOutputEntries], +) -> list[FunctionCallEntry]: + """Extract all FunctionCallEntry from a conversation response""" + function_calls = [] + for entry in output_entries: + if isinstance(entry, FunctionCallEntry): + function_calls.append(entry) + return function_calls diff --git a/src/mistralai/extra/run/utils.py b/src/mistralai/extra/run/utils.py new file mode 100644 index 00000000..231c7131 --- /dev/null +++ b/src/mistralai/extra/run/utils.py @@ -0,0 +1,36 @@ +import importlib.util +import sys +from typing import Callable, TypeVar, Any, cast +from functools import wraps + +from mistralai.extra.exceptions import MistralClientException + +F = TypeVar("F", bound=Callable[..., Any]) + + +REQUIRED_PYTHON_VERSION = (3, 10) +REQUIRED_PYTHON_VERSION_STR = "3.10" +REQUIRED_PACKAGES = ["mcp"] + + +def is_module_installed(module_name: str) -> bool: + spec = importlib.util.find_spec(module_name) + return spec is not None + + +def run_requirements(func: F) -> F: + @wraps(func) + def wrapper(*args, **kwargs): + if sys.version_info < REQUIRED_PYTHON_VERSION: + raise MistralClientException( + f"{func.__name__} requires a Python version higher than {REQUIRED_PYTHON_VERSION_STR}." + f"You are using Python {sys.version_info.major}.{sys.version_info.minor}." + ) + for package in REQUIRED_PACKAGES: + if not is_module_installed(package): + raise MistralClientException( + f"{func.__name__} requires the sdk to be installed with 'agents' extra dependencies." + ) + return func(*args, **kwargs) + + return cast(F, wrapper) diff --git a/src/mistralai/extra/struct_chat.py b/src/mistralai/extra/struct_chat.py new file mode 100644 index 00000000..d3fd3f5a --- /dev/null +++ b/src/mistralai/extra/struct_chat.py @@ -0,0 +1,48 @@ +import json +from typing import Generic + +from mistralai.client.models import AssistantMessage, ChatCompletionChoice, ChatCompletionResponse +from .utils.response_format import CustomPydanticModel, pydantic_model_from_json + + +class ParsedAssistantMessage(AssistantMessage, Generic[CustomPydanticModel]): + parsed: CustomPydanticModel | None + + +class ParsedChatCompletionChoice(ChatCompletionChoice, Generic[CustomPydanticModel]): + message: ParsedAssistantMessage[CustomPydanticModel] | None # type: ignore + + +class ParsedChatCompletionResponse(ChatCompletionResponse, Generic[CustomPydanticModel]): + choices: list[ParsedChatCompletionChoice[CustomPydanticModel]] | None # type: ignore + + +def convert_to_parsed_chat_completion_response( + response: ChatCompletionResponse, + response_format: type[CustomPydanticModel], +) -> ParsedChatCompletionResponse[CustomPydanticModel]: + parsed_choices = [] + + if response.choices: + for choice in response.choices: + if choice.message: + parsed_message: ParsedAssistantMessage = ParsedAssistantMessage( + **choice.message.model_dump(), + parsed=None + ) + if isinstance(parsed_message.content, str): + parsed_message.parsed = pydantic_model_from_json(json.loads(parsed_message.content), response_format) + elif parsed_message.content is None: + parsed_message.parsed = None + else: + raise TypeError(f"Unexpected type for message.content: {type(parsed_message.content)}") + choice_dict = choice.model_dump() + choice_dict["message"] = parsed_message + parsed_choice: ParsedChatCompletionChoice = ParsedChatCompletionChoice(**choice_dict) + parsed_choices.append(parsed_choice) + else: + parsed_choice = ParsedChatCompletionChoice(**choice.model_dump()) + parsed_choices.append(parsed_choice) + response_dict = response.model_dump() + response_dict["choices"] = parsed_choices + return ParsedChatCompletionResponse(**response_dict) diff --git a/src/mistralai/py.typed b/src/mistralai/extra/tests/__init__.py similarity index 100% rename from src/mistralai/py.typed rename to src/mistralai/extra/tests/__init__.py diff --git a/src/mistralai/extra/tests/test_struct_chat.py b/src/mistralai/extra/tests/test_struct_chat.py new file mode 100644 index 00000000..7b79bf77 --- /dev/null +++ b/src/mistralai/extra/tests/test_struct_chat.py @@ -0,0 +1,103 @@ +import unittest +from ..struct_chat import ( + convert_to_parsed_chat_completion_response, + ParsedChatCompletionResponse, + ParsedChatCompletionChoice, + ParsedAssistantMessage, +) +from mistralai.client.models import ( + ChatCompletionResponse, + UsageInfo, + ChatCompletionChoice, + AssistantMessage, +) +from pydantic import BaseModel + + +class Explanation(BaseModel): + explanation: str + output: str + + +class MathDemonstration(BaseModel): + steps: list[Explanation] + final_answer: str + + +mock_cc_response = ChatCompletionResponse( + id="c0271b2098954c6094231703875ca0bc", + object="chat.completion", + model="mistral-large-latest", + usage=UsageInfo(prompt_tokens=75, completion_tokens=220, total_tokens=295), + created=1737727558, + choices=[ + ChatCompletionChoice( + index=0, + message=AssistantMessage( + content='{\n "final_answer": "x = -4",\n "steps": [\n {\n "explanation": "Start with the given equation.",\n "output": "8x + 7 = -23"\n },\n {\n "explanation": "Subtract 7 from both sides to isolate the term with x.",\n "output": "8x = -23 - 7"\n },\n {\n "explanation": "Simplify the right side of the equation.",\n "output": "8x = -30"\n },\n {\n "explanation": "Divide both sides by 8 to solve for x.",\n "output": "x = -30 / 8"\n },\n {\n "explanation": "Simplify the fraction to get the final answer.",\n "output": "x = -4"\n }\n ]\n}', + tool_calls=None, + prefix=False, + role="assistant", + ), + finish_reason="stop", + ) + ], +) + + +expected_response: ParsedChatCompletionResponse = ParsedChatCompletionResponse( + choices=[ + ParsedChatCompletionChoice( + index=0, + message=ParsedAssistantMessage( + content='{\n "final_answer": "x = -4",\n "steps": [\n {\n "explanation": "Start with the given equation.",\n "output": "8x + 7 = -23"\n },\n {\n "explanation": "Subtract 7 from both sides to isolate the term with x.",\n "output": "8x = -23 - 7"\n },\n {\n "explanation": "Simplify the right side of the equation.",\n "output": "8x = -30"\n },\n {\n "explanation": "Divide both sides by 8 to solve for x.",\n "output": "x = -30 / 8"\n },\n {\n "explanation": "Simplify the fraction to get the final answer.",\n "output": "x = -4"\n }\n ]\n}', + tool_calls=None, + prefix=False, + role="assistant", + parsed=MathDemonstration( + steps=[ + Explanation( + explanation="Start with the given equation.", + output="8x + 7 = -23", + ), + Explanation( + explanation="Subtract 7 from both sides to isolate the term with x.", + output="8x = -23 - 7", + ), + Explanation( + explanation="Simplify the right side of the equation.", + output="8x = -30", + ), + Explanation( + explanation="Divide both sides by 8 to solve for x.", + output="x = -30 / 8", + ), + Explanation( + explanation="Simplify the fraction to get the final answer.", + output="x = -4", + ), + ], + final_answer="x = -4", + ), + ), + finish_reason="stop", + ) + ], + created=1737727558, + id="c0271b2098954c6094231703875ca0bc", + model="mistral-large-latest", + object="chat.completion", + usage=UsageInfo(prompt_tokens=75, completion_tokens=220, total_tokens=295), +) + + +class TestConvertToParsedChatCompletionResponse(unittest.TestCase): + def test_convert_to_parsed_chat_completion_response(self): + output = convert_to_parsed_chat_completion_response( + mock_cc_response, MathDemonstration + ) + self.assertEqual(output, expected_response) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/mistralai/extra/tests/test_utils.py b/src/mistralai/extra/tests/test_utils.py new file mode 100644 index 00000000..35523fbd --- /dev/null +++ b/src/mistralai/extra/tests/test_utils.py @@ -0,0 +1,162 @@ +from ..utils.response_format import ( + pydantic_model_from_json, + response_format_from_pydantic_model, + rec_strict_json_schema, +) +from pydantic import BaseModel, ValidationError + +from mistralai.client.models import ResponseFormat, JSONSchema +from mistralai.client.types.basemodel import Unset + +import unittest + + +class Student(BaseModel): + name: str + age: int + + +class Explanation(BaseModel): + explanation: str + output: str + + +class MathDemonstration(BaseModel): + steps: list[Explanation] + final_answer: str + + +mathdemo_schema = { + "$defs": { + "Explanation": { + "properties": { + "explanation": {"title": "Explanation", "type": "string"}, + "output": {"title": "Output", "type": "string"}, + }, + "required": ["explanation", "output"], + "title": "Explanation", + "type": "object", + } + }, + "properties": { + "steps": { + "items": {"$ref": "#/$defs/Explanation"}, + "title": "Steps", + "type": "array", + }, + "final_answer": {"title": "Final Answer", "type": "string"}, + }, + "required": ["steps", "final_answer"], + "title": "MathDemonstration", + "type": "object", +} + +mathdemo_strict_schema = mathdemo_schema.copy() +mathdemo_strict_schema["$defs"]["Explanation"]["additionalProperties"] = False # type: ignore +mathdemo_strict_schema["additionalProperties"] = False + +mathdemo_response_format = ResponseFormat( + type="json_schema", + json_schema=JSONSchema( + name="MathDemonstration", + schema_definition=mathdemo_strict_schema, + description=Unset(), + strict=True, + ), +) + + +class TestResponseFormat(unittest.TestCase): + def test_pydantic_model_from_json(self): + missing_json_data = {"name": "Jean Dupont"} + good_json_data = {"name": "Jean Dupont", "age": 25} + extra_json_data = { + "name": "Jean Dupont", + "age": 25, + "extra_field": "extra_value", + } + complex_json_data = { + "final_answer": "x = -4", + "steps": [ + { + "explanation": "Start with the given equation.", + "output": "8x + 7 = -23", + }, + { + "explanation": "Subtract 7 from both sides to isolate the term with x.", + "output": "8x = -23 - 7", + }, + { + "explanation": "Simplify the right side of the equation.", + "output": "8x = -30", + }, + { + "explanation": "Divide both sides by 8 to solve for x.", + "output": "x = -30 / 8", + }, + { + "explanation": "Simplify the fraction to get the final answer.", + "output": "x = -4", + }, + ], + } + + self.assertEqual( + pydantic_model_from_json(good_json_data, Student), + Student(name="Jean Dupont", age=25), + ) + self.assertEqual( + pydantic_model_from_json(extra_json_data, Student), + Student(name="Jean Dupont", age=25), + ) + self.assertEqual( + pydantic_model_from_json(complex_json_data, MathDemonstration), + MathDemonstration( + steps=[ + Explanation( + explanation="Start with the given equation.", + output="8x + 7 = -23", + ), + Explanation( + explanation="Subtract 7 from both sides to isolate the term with x.", + output="8x = -23 - 7", + ), + Explanation( + explanation="Simplify the right side of the equation.", + output="8x = -30", + ), + Explanation( + explanation="Divide both sides by 8 to solve for x.", + output="x = -30 / 8", + ), + Explanation( + explanation="Simplify the fraction to get the final answer.", + output="x = -4", + ), + ], + final_answer="x = -4", + ), + ) + + # Check it raises a validation error + with self.assertRaises(ValidationError): + pydantic_model_from_json(missing_json_data, Student) # type: ignore + + def test_response_format_from_pydantic_model(self): + self.assertEqual( + response_format_from_pydantic_model(MathDemonstration), + mathdemo_response_format, + ) + + def test_rec_strict_json_schema(self): + invalid_schema = mathdemo_schema | {"wrong_value": 1} + self.assertEqual( + rec_strict_json_schema(mathdemo_schema), mathdemo_strict_schema + ) + + with self.assertRaises(ValueError): + rec_strict_json_schema(invalid_schema) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/mistralai/extra/utils/__init__.py b/src/mistralai/extra/utils/__init__.py new file mode 100644 index 00000000..5011f1a6 --- /dev/null +++ b/src/mistralai/extra/utils/__init__.py @@ -0,0 +1,3 @@ +from .response_format import response_format_from_pydantic_model + +__all__ = ["response_format_from_pydantic_model"] diff --git a/src/mistralai/extra/utils/_pydantic_helper.py b/src/mistralai/extra/utils/_pydantic_helper.py new file mode 100644 index 00000000..f042c394 --- /dev/null +++ b/src/mistralai/extra/utils/_pydantic_helper.py @@ -0,0 +1,21 @@ +from typing import Any + + +def rec_strict_json_schema(schema_node: Any) -> Any: + """ + Recursively set the additionalProperties property to False for all objects in the JSON Schema. + This makes the JSON Schema strict (i.e. no additional properties are allowed). + """ + if isinstance(schema_node, (str, bool)) or schema_node is None: + return schema_node + if isinstance(schema_node, dict): + if "type" in schema_node and schema_node["type"] == "object": + schema_node["additionalProperties"] = False + for key, value in schema_node.items(): + schema_node[key] = rec_strict_json_schema(value) + elif isinstance(schema_node, list): + for i, value in enumerate(schema_node): + schema_node[i] = rec_strict_json_schema(value) + else: + raise ValueError(f"Unexpected type: {schema_node}") + return schema_node diff --git a/src/mistralai/extra/utils/response_format.py b/src/mistralai/extra/utils/response_format.py new file mode 100644 index 00000000..2378b562 --- /dev/null +++ b/src/mistralai/extra/utils/response_format.py @@ -0,0 +1,26 @@ +from typing import Any, TypeVar + +from pydantic import BaseModel +from mistralai.client.models import JSONSchema, ResponseFormat +from ._pydantic_helper import rec_strict_json_schema + +CustomPydanticModel = TypeVar("CustomPydanticModel", bound=BaseModel) + + +def response_format_from_pydantic_model( + model: type[CustomPydanticModel], +) -> ResponseFormat: + """Generate a strict JSON schema from a pydantic model.""" + model_schema = rec_strict_json_schema(model.model_json_schema()) + json_schema = JSONSchema.model_validate( + {"name": model.__name__, "schema": model_schema, "strict": True} + ) + return ResponseFormat(type="json_schema", json_schema=json_schema) + + +def pydantic_model_from_json( + json_data: dict[str, Any], + pydantic_model: type[CustomPydanticModel], +) -> CustomPydanticModel: + """Parse a JSON schema into a pydantic model.""" + return pydantic_model.model_validate(json_data) diff --git a/src/mistralai/models/chat_completion.py b/src/mistralai/models/chat_completion.py deleted file mode 100644 index ef1b09e6..00000000 --- a/src/mistralai/models/chat_completion.py +++ /dev/null @@ -1,51 +0,0 @@ -from enum import Enum -from typing import List, Optional - -from pydantic import BaseModel - -from mistralai.models.common import UsageInfo - - -class ChatMessage(BaseModel): - role: str - content: str - - -class DeltaMessage(BaseModel): - role: Optional[str] = None - content: Optional[str] = None - - -class FinishReason(Enum): - stop = "stop" - length = "length" - - -class ChatCompletionResponseStreamChoice(BaseModel): - index: int - delta: DeltaMessage - finish_reason: Optional[FinishReason] - - -class ChatCompletionStreamResponse(BaseModel): - id: str - model: str - choices: List[ChatCompletionResponseStreamChoice] - created: Optional[int] = None - object: Optional[str] = None - usage: Optional[UsageInfo] = None - - -class ChatCompletionResponseChoice(BaseModel): - index: int - message: ChatMessage - finish_reason: Optional[FinishReason] - - -class ChatCompletionResponse(BaseModel): - id: str - object: str - created: int - model: str - choices: List[ChatCompletionResponseChoice] - usage: UsageInfo diff --git a/src/mistralai/models/common.py b/src/mistralai/models/common.py deleted file mode 100644 index 11b71e50..00000000 --- a/src/mistralai/models/common.py +++ /dev/null @@ -1,9 +0,0 @@ -from typing import Optional - -from pydantic import BaseModel - - -class UsageInfo(BaseModel): - prompt_tokens: int - total_tokens: int - completion_tokens: Optional[int] diff --git a/src/mistralai/models/embeddings.py b/src/mistralai/models/embeddings.py deleted file mode 100644 index a3200f74..00000000 --- a/src/mistralai/models/embeddings.py +++ /dev/null @@ -1,19 +0,0 @@ -from typing import List - -from pydantic import BaseModel - -from mistralai.models.common import UsageInfo - - -class EmbeddingObject(BaseModel): - object: str - embedding: List[float] - index: int - - -class EmbeddingResponse(BaseModel): - id: str - object: str - data: List[EmbeddingObject] - model: str - usage: UsageInfo diff --git a/src/mistralai/models/models.py b/src/mistralai/models/models.py deleted file mode 100644 index 8b3b6d7c..00000000 --- a/src/mistralai/models/models.py +++ /dev/null @@ -1,32 +0,0 @@ -from typing import List, Optional - -from pydantic import BaseModel - - -class ModelPermission(BaseModel): - id: str - object: str - created: int - allow_create_engine: bool = False - allow_sampling: bool = True - allow_logprobs: bool = True - allow_search_indices: bool = False - allow_view: bool = True - allow_fine_tuning: bool = False - organization: str = "*" - group: Optional[str] = None - is_blocking: bool = False - -class ModelCard(BaseModel): - id: str - object: str - created: int - owned_by: str - root: Optional[str] = None - parent: Optional[str] = None - permission: List[ModelPermission] = [] - - -class ModelList(BaseModel): - object: str - data: List[ModelCard] diff --git a/tasks.py b/tasks.py new file mode 100644 index 00000000..8b1bc3f0 --- /dev/null +++ b/tasks.py @@ -0,0 +1,48 @@ +import re +from invoke.context import Context +from invoke.tasks import task +from utils.speakeasy import ( + pin_speakeasy_version, + OpenAPISpecsPinned, + SpeakeasyTargets, + WORKFLOW_PATH, + WORKFLOW_LOCK_PATH, +) + + +@task(iterable=["targets"]) +def update_speakeasy( + ctx: Context, + version: str, + targets: list[SpeakeasyTargets] = [SpeakeasyTargets.ALL], + workflow_path: str = WORKFLOW_PATH, + workflow_lock_path: str = WORKFLOW_LOCK_PATH, + verbose: bool = False, +): + """ + Update the speakeasy version and pin the openapi specs to the current revision. + + Usage: + inv update-speakeasy --version "1.580.2" --targets "all" + inv update-speakeasy --version "1.580.2" --targets "mistralai-azure-sdk" --targets "mistralai-gcp-sdk" --verbose + inv update-speakeasy --version "1.580.2" --targets "mistralai-sdk" --workflow-path ".speakeasy/workflow.yaml" --workflow-lock-path ".speakeasy/workflow.lock.yaml" + inv update-speakeasy --version "1.580.2" --targets "mistralai-sdk" --workflow-path ".speakeasy/workflow.yaml" --workflow-lock-path ".speakeasy/workflow.lock.yaml" --verbose + """ + if not re.match(r'^\d+\.\d+\.\d+$', version): + raise ValueError(f"Invalid version format: {version}. Expected format: X.Y.Z (e.g., 1.2.3)") + for target in targets: + try: + SpeakeasyTargets(target) + except ValueError: + raise ValueError( + f"Invalid target: {target}. Your targets must be one of {SpeakeasyTargets.list()}" + ) + cmd = ( + "speakeasy run" + + " --skip-versioning" + + "".join(f" -t {target}" for target in targets) + + (" --verbose" if verbose else "") + ) + pin_speakeasy_version(workflow_path=workflow_path, version=version) + with OpenAPISpecsPinned(workflow_path, workflow_lock_path): + ctx.run(cmd) diff --git a/tests/test_azure_integration.py b/tests/test_azure_integration.py new file mode 100644 index 00000000..ac4e38a1 --- /dev/null +++ b/tests/test_azure_integration.py @@ -0,0 +1,433 @@ +""" +Integration tests for Azure SDK. + +These tests require credentials and make real API calls. +Skip if AZURE_API_KEY env var is not set. + +Prerequisites: + 1. Azure API key (stored in Bitwarden at "[MaaS] - Azure Foundry API key") + 2. Tailscale connected via gw-0 exit node + +Usage: + AZURE_API_KEY=xxx pytest tests/test_azure_integration.py -v + +Environment variables: + AZURE_API_KEY: API key (required) + AZURE_ENDPOINT: Base URL (default: https://maas-qa-aifoundry.services.ai.azure.com/models) + AZURE_MODEL: Model name (default: maas-qa-ministral-3b) + AZURE_API_VERSION: API version (default: 2024-05-01-preview) + +Note: AZURE_ENDPOINT should be the base URL without path suffixes. +The SDK appends /chat/completions to this URL. The api_version parameter +is automatically injected as a query parameter by the SDK. + +Available models: + Chat: maas-qa-ministral-3b, maas-qa-mistral-large-3, maas-qa-mistral-medium-2505 + OCR: maas-qa-mistral-document-ai-2505, maas-qa-mistral-document-ai-2512 + (OCR uses a separate endpoint, not tested here) +""" +import json +import os + +import pytest + +# Configuration from env vars +AZURE_API_KEY = os.environ.get("AZURE_API_KEY") +AZURE_ENDPOINT = os.environ.get( + "AZURE_ENDPOINT", + "https://maas-qa-aifoundry.services.ai.azure.com/models", +) +AZURE_MODEL = os.environ.get("AZURE_MODEL", "maas-qa-ministral-3b") +AZURE_API_VERSION = os.environ.get("AZURE_API_VERSION", "2024-05-01-preview") + +SKIP_REASON = "AZURE_API_KEY env var required" + +pytestmark = pytest.mark.skipif( + not AZURE_API_KEY, + reason=SKIP_REASON +) + +# Shared tool definition for tool-call tests +WEATHER_TOOL = { + "type": "function", + "function": { + "name": "get_weather", + "description": "Get the weather in a city", + "parameters": { + "type": "object", + "properties": {"city": {"type": "string"}}, + "required": ["city"], + }, + }, +} + + +@pytest.fixture +def azure_client(): + """Create an Azure client with api_version parameter.""" + from mistralai.azure.client import MistralAzure + assert AZURE_API_KEY is not None, "AZURE_API_KEY must be set" + return MistralAzure( + api_key=AZURE_API_KEY, + server_url=AZURE_ENDPOINT, + api_version=AZURE_API_VERSION, + ) + + +class TestAzureChatComplete: + """Test synchronous chat completion.""" + + def test_basic_completion(self, azure_client): + """Test basic chat completion returns a response.""" + res = azure_client.chat.complete( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "Say 'hello' and nothing else."} + ], + ) + assert res is not None + assert res.choices is not None + assert len(res.choices) > 0 + assert res.choices[0].message is not None + assert res.choices[0].message.content is not None + assert len(res.choices[0].message.content) > 0 + + def test_completion_with_system_message(self, azure_client): + """Test chat completion with system + user message.""" + res = azure_client.chat.complete( + model=AZURE_MODEL, + messages=[ + {"role": "system", "content": "You are a pirate. Respond in pirate speak."}, + {"role": "user", "content": "Say hello."}, + ], + ) + assert res is not None + assert res.choices[0].message.content is not None + assert len(res.choices[0].message.content) > 0 + + def test_completion_with_max_tokens(self, azure_client): + """Test chat completion respects max_tokens.""" + res = azure_client.chat.complete( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "Count from 1 to 100."} + ], + max_tokens=10, + ) + assert res is not None + assert res.choices[0].finish_reason in ("length", "stop") + + def test_completion_with_temperature(self, azure_client): + """Test chat completion accepts temperature parameter.""" + res = azure_client.chat.complete( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "Say 'test'."} + ], + temperature=0.0, + ) + assert res is not None + assert res.choices[0].message.content is not None + + def test_completion_with_stop_sequence(self, azure_client): + """Test chat completion stops at stop sequence.""" + res = azure_client.chat.complete( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "Write three sentences about the sky."} + ], + stop=["."], + ) + assert res is not None + content = res.choices[0].message.content + assert content is not None + # The model should stop at or before the first period + assert content.count(".") <= 1 + + def test_completion_with_random_seed(self, azure_client): + """Test chat completion with random_seed returns valid responses.""" + res1 = azure_client.chat.complete( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "Say 'deterministic'."} + ], + random_seed=42, + ) + res2 = azure_client.chat.complete( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "Say 'deterministic'."} + ], + random_seed=42, + ) + # Both should return valid responses (not asserting equality due to model non-determinism) + assert res1.choices[0].message.content is not None + assert res2.choices[0].message.content is not None + + def test_multi_turn_conversation(self, azure_client): + """Test multi-turn conversation with user/assistant round-trip.""" + res1 = azure_client.chat.complete( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "My name is Alice."} + ], + ) + assert res1.choices[0].message.content is not None + + res2 = azure_client.chat.complete( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "My name is Alice."}, + {"role": "assistant", "content": res1.choices[0].message.content}, + {"role": "user", "content": "What is my name?"}, + ], + ) + assert res2.choices[0].message.content is not None + assert "Alice" in res2.choices[0].message.content + + def test_tool_call(self, azure_client): + """Test that the model returns a tool call when given tools.""" + res = azure_client.chat.complete( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "What is the weather in Paris?"} + ], + tools=[WEATHER_TOOL], + tool_choice="any", + ) + assert res is not None + choice = res.choices[0] + assert choice.message.tool_calls is not None + assert len(choice.message.tool_calls) > 0 + tool_call = choice.message.tool_calls[0] + assert tool_call.function.name == "get_weather" + args = json.loads(tool_call.function.arguments) + assert "city" in args + + def test_json_response_format(self, azure_client): + """Test JSON response format returns valid JSON.""" + res = azure_client.chat.complete( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "Return a JSON object with a key 'greeting' and value 'hello'."} + ], + response_format={"type": "json_object"}, + ) + assert res is not None + content = res.choices[0].message.content + assert content is not None + parsed = json.loads(content) + assert isinstance(parsed, dict) + + def test_completion_with_n(self, azure_client): + """Test completion with n=2 returns multiple choices.""" + res = azure_client.chat.complete( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "Say a random word."} + ], + n=2, + ) + assert res is not None + assert len(res.choices) == 2 + for choice in res.choices: + assert choice.message.content is not None + + +class TestAzureChatStream: + """Test streaming chat completion.""" + + def test_basic_stream(self, azure_client): + """Test streaming returns chunks with content.""" + stream = azure_client.chat.stream( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "Say 'hello' and nothing else."} + ], + ) + + chunks = list(stream) + assert len(chunks) > 0 + + content = "" + for chunk in chunks: + if chunk.data.choices and chunk.data.choices[0].delta.content: + content += chunk.data.choices[0].delta.content + + assert len(content) > 0 + + def test_stream_with_max_tokens(self, azure_client): + """Test streaming respects max_tokens truncation.""" + stream = azure_client.chat.stream( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "Count from 1 to 100."} + ], + max_tokens=10, + ) + + chunks = list(stream) + assert len(chunks) > 0 + + # Find finish_reason in any chunk + finish_reasons = [ + chunk.data.choices[0].finish_reason + for chunk in chunks + if chunk.data.choices and chunk.data.choices[0].finish_reason is not None + ] + assert len(finish_reasons) > 0 + assert finish_reasons[-1] in ("length", "stop") + + def test_stream_finish_reason(self, azure_client): + """Test that the last chunk has a finish_reason.""" + stream = azure_client.chat.stream( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "Say 'hi'."} + ], + ) + + chunks = list(stream) + assert len(chunks) > 0 + + # The final chunk(s) should contain a finish_reason + finish_reasons = [ + chunk.data.choices[0].finish_reason + for chunk in chunks + if chunk.data.choices and chunk.data.choices[0].finish_reason is not None + ] + assert len(finish_reasons) > 0 + assert finish_reasons[-1] == "stop" + + def test_stream_tool_call(self, azure_client): + """Test tool call via streaming, collecting tool_call delta chunks.""" + stream = azure_client.chat.stream( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "What is the weather in Paris?"} + ], + tools=[WEATHER_TOOL], + tool_choice="any", + ) + + chunks = list(stream) + assert len(chunks) > 0 + + # Collect tool call information from delta chunks + tool_call_found = False + for chunk in chunks: + if chunk.data.choices and chunk.data.choices[0].delta.tool_calls: + tool_call_found = True + break + + assert tool_call_found, "Expected tool_call delta chunks in stream" + + +class TestAzureChatCompleteAsync: + """Test async chat completion.""" + + @pytest.mark.asyncio + async def test_basic_completion_async(self, azure_client): + """Test async chat completion returns a response.""" + res = await azure_client.chat.complete_async( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "Say 'hello' and nothing else."} + ], + ) + assert res is not None + assert res.choices is not None + assert len(res.choices) > 0 + assert res.choices[0].message.content is not None + + @pytest.mark.asyncio + async def test_completion_with_system_message_async(self, azure_client): + """Test async chat completion with system + user message.""" + res = await azure_client.chat.complete_async( + model=AZURE_MODEL, + messages=[ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "Say 'hello'."}, + ], + ) + assert res is not None + assert res.choices[0].message.content is not None + + @pytest.mark.asyncio + async def test_tool_call_async(self, azure_client): + """Test async tool call returns tool_calls.""" + res = await azure_client.chat.complete_async( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "What is the weather in Paris?"} + ], + tools=[WEATHER_TOOL], + tool_choice="any", + ) + assert res is not None + choice = res.choices[0] + assert choice.message.tool_calls is not None + assert len(choice.message.tool_calls) > 0 + assert choice.message.tool_calls[0].function.name == "get_weather" + + +class TestAzureChatStreamAsync: + """Test async streaming chat completion.""" + + @pytest.mark.asyncio + async def test_basic_stream_async(self, azure_client): + """Test async streaming returns chunks with content.""" + stream = await azure_client.chat.stream_async( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "Say 'hello' and nothing else."} + ], + ) + + content = "" + async for chunk in stream: + if chunk.data.choices and chunk.data.choices[0].delta.content: + content += chunk.data.choices[0].delta.content + + assert len(content) > 0 + + +class TestAzureContextManager: + """Test context manager support.""" + + def test_sync_context_manager(self): + """Test that MistralAzure works as a sync context manager.""" + from mistralai.azure.client import MistralAzure + assert AZURE_API_KEY is not None, "AZURE_API_KEY must be set" + with MistralAzure( + api_key=AZURE_API_KEY, + server_url=AZURE_ENDPOINT, + api_version=AZURE_API_VERSION, + ) as client: + res = client.chat.complete( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "Say 'context'."} + ], + ) + assert res is not None + assert res.choices[0].message.content is not None + + @pytest.mark.asyncio + async def test_async_context_manager(self): + """Test that MistralAzure works as an async context manager.""" + from mistralai.azure.client import MistralAzure + assert AZURE_API_KEY is not None, "AZURE_API_KEY must be set" + async with MistralAzure( + api_key=AZURE_API_KEY, + server_url=AZURE_ENDPOINT, + api_version=AZURE_API_VERSION, + ) as client: + res = await client.chat.complete_async( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "Say 'async context'."} + ], + ) + assert res is not None + assert res.choices[0].message.content is not None diff --git a/tests/test_azure_v2_parity.py b/tests/test_azure_v2_parity.py new file mode 100644 index 00000000..8cd89bf4 --- /dev/null +++ b/tests/test_azure_v2_parity.py @@ -0,0 +1,269 @@ +""" +Parity tests for the Azure v2 SDK. + +Verifies that the regenerated mistralai.azure package exposes +the same public API surface as the v1 mistralai_azure package. +Uses introspection only — no API calls or credentials required. +""" +import inspect + +import pytest + +from mistralai.azure.client import MistralAzure +from mistralai.azure.client.chat import Chat +from mistralai.azure.client.ocr import Ocr +from mistralai.azure.client.types import UNSET + +AZURE_METHODS: dict[str, set[str]] = { + "chat": {"complete", "stream"}, + "ocr": {"process"}, +} + +TESTED_METHODS: set[str] = set() + +_EMPTY = inspect.Parameter.empty + + +def mark_tested(resource: str, method: str) -> None: + TESTED_METHODS.add(f"{resource}.{method}") + + +# --------------------------------------------------------------------------- +# Expected parameter specs: (name, expected_default) +# Use _EMPTY for required params, UNSET for OptionalNullable, None for Optional +# --------------------------------------------------------------------------- + +CONSTRUCTOR_PARAMS = [ + ("api_key", _EMPTY), + ("server", None), + ("server_url", None), + ("url_params", None), + ("client", None), + ("async_client", None), + ("retry_config", UNSET), + ("timeout_ms", None), + ("debug_logger", None), + ("api_version", "2024-05-01-preview"), +] + +CHAT_COMPLETE_PARAMS = [ + ("messages", _EMPTY), + ("model", "azureai"), + ("temperature", UNSET), + ("top_p", None), + ("max_tokens", UNSET), + ("stream", False), + ("stop", None), + ("random_seed", UNSET), + ("metadata", UNSET), + ("response_format", None), + ("tools", UNSET), + ("tool_choice", None), + ("presence_penalty", None), + ("frequency_penalty", None), + ("n", UNSET), + ("prediction", None), + ("parallel_tool_calls", None), + ("prompt_mode", UNSET), + ("safe_prompt", None), + ("retries", UNSET), + ("server_url", None), + ("timeout_ms", None), + ("http_headers", None), +] + +CHAT_STREAM_PARAMS = [ + (name, True if name == "stream" else default) + for name, default in CHAT_COMPLETE_PARAMS +] + +OCR_PROCESS_PARAMS = [ + ("model", _EMPTY), + ("document", _EMPTY), + ("id", None), + ("pages", UNSET), + ("include_image_base64", UNSET), + ("image_limit", UNSET), + ("image_min_size", UNSET), + ("bbox_annotation_format", UNSET), + ("document_annotation_format", UNSET), + ("document_annotation_prompt", UNSET), + ("table_format", UNSET), + ("extract_header", None), + ("extract_footer", None), + ("retries", UNSET), + ("server_url", None), + ("timeout_ms", None), + ("http_headers", None), +] + + +# --------------------------------------------------------------------------- +# Tests +# --------------------------------------------------------------------------- + + +class TestAzureSDKStructure: + def test_sdk_has_chat(self): + assert "chat" in MistralAzure.__annotations__ + + def test_sdk_has_ocr(self): + assert "ocr" in MistralAzure.__annotations__ + + @pytest.mark.parametrize("param_name,expected_default", CONSTRUCTOR_PARAMS) + def test_constructor_param(self, param_name, expected_default): + sig = inspect.signature(MistralAzure.__init__) + assert param_name in sig.parameters, f"Missing constructor param: {param_name}" + actual = sig.parameters[param_name].default + assert actual == expected_default, ( + f"Constructor param {param_name}: expected {expected_default!r}, got {actual!r}" + ) + + @pytest.mark.parametrize("method", ["__enter__", "__exit__", "__aenter__", "__aexit__"]) + def test_context_manager_support(self, method): + assert hasattr(MistralAzure, method), f"MistralAzure missing {method}" + + +class TestAzureChat: + def test_has_complete(self): + assert hasattr(Chat, "complete") + mark_tested("chat", "complete") + + def test_has_complete_async(self): + assert hasattr(Chat, "complete_async") + mark_tested("chat", "complete_async") + + def test_has_stream(self): + assert hasattr(Chat, "stream") + mark_tested("chat", "stream") + + def test_has_stream_async(self): + assert hasattr(Chat, "stream_async") + mark_tested("chat", "stream_async") + + # -- complete params -- + @pytest.mark.parametrize("param_name,expected_default", CHAT_COMPLETE_PARAMS) + def test_complete_has_param(self, param_name, expected_default): + sig = inspect.signature(Chat.complete) + assert param_name in sig.parameters, f"Chat.complete missing param: {param_name}" + actual = sig.parameters[param_name].default + assert actual == expected_default, ( + f"Chat.complete param {param_name}: expected {expected_default!r}, got {actual!r}" + ) + + # -- stream params -- + @pytest.mark.parametrize("param_name,expected_default", CHAT_STREAM_PARAMS) + def test_stream_has_param(self, param_name, expected_default): + sig = inspect.signature(Chat.stream) + assert param_name in sig.parameters, f"Chat.stream missing param: {param_name}" + actual = sig.parameters[param_name].default + assert actual == expected_default, ( + f"Chat.stream param {param_name}: expected {expected_default!r}, got {actual!r}" + ) + + # -- complete_async matches complete -- + @pytest.mark.parametrize("param_name,expected_default", CHAT_COMPLETE_PARAMS) + def test_complete_async_has_param(self, param_name, expected_default): + sig = inspect.signature(Chat.complete_async) + assert param_name in sig.parameters, f"Chat.complete_async missing param: {param_name}" + actual = sig.parameters[param_name].default + assert actual == expected_default, ( + f"Chat.complete_async param {param_name}: expected {expected_default!r}, got {actual!r}" + ) + + # -- stream_async matches stream -- + @pytest.mark.parametrize("param_name,expected_default", CHAT_STREAM_PARAMS) + def test_stream_async_has_param(self, param_name, expected_default): + sig = inspect.signature(Chat.stream_async) + assert param_name in sig.parameters, f"Chat.stream_async missing param: {param_name}" + actual = sig.parameters[param_name].default + assert actual == expected_default, ( + f"Chat.stream_async param {param_name}: expected {expected_default!r}, got {actual!r}" + ) + + # -- sync/async parity -- + def test_complete_async_matches_complete(self): + sync_params = set(inspect.signature(Chat.complete).parameters) - {"self"} + async_params = set(inspect.signature(Chat.complete_async).parameters) - {"self"} + assert sync_params == async_params + + def test_stream_async_matches_stream(self): + sync_params = set(inspect.signature(Chat.stream).parameters) - {"self"} + async_params = set(inspect.signature(Chat.stream_async).parameters) - {"self"} + assert sync_params == async_params + + # -- key defaults -- + def test_complete_model_defaults_azureai(self): + sig = inspect.signature(Chat.complete) + assert sig.parameters["model"].default == "azureai" + + def test_stream_model_defaults_azureai(self): + sig = inspect.signature(Chat.stream) + assert sig.parameters["model"].default == "azureai" + + def test_complete_stream_defaults_false(self): + sig = inspect.signature(Chat.complete) + assert sig.parameters["stream"].default is False + + def test_stream_stream_defaults_true(self): + sig = inspect.signature(Chat.stream) + assert sig.parameters["stream"].default is True + + +class TestAzureOcr: + def test_has_process(self): + assert hasattr(Ocr, "process") + mark_tested("ocr", "process") + + def test_has_process_async(self): + assert hasattr(Ocr, "process_async") + mark_tested("ocr", "process_async") + + # -- process params -- + @pytest.mark.parametrize("param_name,expected_default", OCR_PROCESS_PARAMS) + def test_process_has_param(self, param_name, expected_default): + sig = inspect.signature(Ocr.process) + assert param_name in sig.parameters, f"Ocr.process missing param: {param_name}" + actual = sig.parameters[param_name].default + assert actual == expected_default, ( + f"Ocr.process param {param_name}: expected {expected_default!r}, got {actual!r}" + ) + + # -- process_async matches process -- + @pytest.mark.parametrize("param_name,expected_default", OCR_PROCESS_PARAMS) + def test_process_async_has_param(self, param_name, expected_default): + sig = inspect.signature(Ocr.process_async) + assert param_name in sig.parameters, f"Ocr.process_async missing param: {param_name}" + actual = sig.parameters[param_name].default + assert actual == expected_default, ( + f"Ocr.process_async param {param_name}: expected {expected_default!r}, got {actual!r}" + ) + + # -- sync/async parity -- + def test_process_async_matches_process(self): + sync_params = set(inspect.signature(Ocr.process).parameters) - {"self"} + async_params = set(inspect.signature(Ocr.process_async).parameters) - {"self"} + assert sync_params == async_params + + +class TestAzureCoverage: + def test_all_methods_tested(self): + expected = set() + for resource, methods in AZURE_METHODS.items(): + for method in methods: + expected.add(f"{resource}.{method}") + expected.add(f"{resource}.{method}_async") + untested = expected - TESTED_METHODS + assert not untested, f"Untested methods: {untested}" + + def test_no_unexpected_public_methods_on_chat(self): + public = {m for m in dir(Chat) if not m.startswith("_") and callable(getattr(Chat, m, None))} + known = {"complete", "complete_async", "stream", "stream_async", "do_request", "do_request_async"} + unexpected = public - known + assert not unexpected, f"Unexpected Chat methods: {unexpected}" + + def test_no_unexpected_public_methods_on_ocr(self): + public = {m for m in dir(Ocr) if not m.startswith("_") and callable(getattr(Ocr, m, None))} + known = {"process", "process_async", "do_request", "do_request_async"} + unexpected = public - known + assert not unexpected, f"Unexpected Ocr methods: {unexpected}" diff --git a/tests/test_gcp_integration.py b/tests/test_gcp_integration.py new file mode 100644 index 00000000..fe24b8b0 --- /dev/null +++ b/tests/test_gcp_integration.py @@ -0,0 +1,512 @@ +""" +Integration tests for GCP SDK. + +These tests require GCP credentials and make real API calls. +Skip if GCP_PROJECT_ID env var is not set. + +Prerequisites: + 1. Authenticate with GCP: gcloud auth application-default login + 2. Have "Vertex AI User" role on the project (e.g. model-garden-420509) + +The SDK automatically: + - Detects credentials via google.auth.default() + - Auto-refreshes tokens when they expire + - Builds the Vertex AI URL from project_id and region + +Available models: + - Chat: mistral-small-2503, mistral-large-2501, ... + - FIM: codestral-2 + See: https://cloud.google.com/vertex-ai/generative-ai/docs/partner-models/mistral + +Usage: + GCP_PROJECT_ID=model-garden-420509 pytest tests/test_gcp_integration.py -v + +Environment variables: + GCP_PROJECT_ID: GCP project ID (required, or auto-detected from credentials) + GCP_REGION: Vertex AI region (default: us-central1) + GCP_MODEL: Model name for chat (default: mistral-small-2503) + GCP_FIM_MODEL: Model name for FIM (default: codestral-2) + +""" +import json +import os + +import pytest + +# Configuration from env vars +GCP_PROJECT_ID = os.environ.get("GCP_PROJECT_ID") +GCP_REGION = os.environ.get("GCP_REGION", "us-central1") +GCP_MODEL = os.environ.get("GCP_MODEL", "mistral-small-2503") +GCP_FIM_MODEL = os.environ.get("GCP_FIM_MODEL", "codestral-2") + +SKIP_REASON = "GCP_PROJECT_ID env var required" + +pytestmark = pytest.mark.skipif( + not GCP_PROJECT_ID, + reason=SKIP_REASON +) + +# Shared tool definition for tool-call tests +WEATHER_TOOL = { + "type": "function", + "function": { + "name": "get_weather", + "description": "Get the weather in a city", + "parameters": { + "type": "object", + "properties": {"city": {"type": "string"}}, + "required": ["city"], + }, + }, +} + + +@pytest.fixture +def gcp_client(): + """Create a GCP client for chat tests. + + The SDK automatically: + - Detects credentials via google.auth.default() + - Auto-refreshes tokens when they expire + - Builds the Vertex AI URL from project_id and region + """ + from mistralai.gcp.client import MistralGCP + return MistralGCP( + project_id=GCP_PROJECT_ID, + region=GCP_REGION, + ) + + +class TestGCPChatComplete: + """Test synchronous chat completion.""" + + def test_basic_completion(self, gcp_client): + """Test basic chat completion returns a response.""" + res = gcp_client.chat.complete( + model=GCP_MODEL, + messages=[ + {"role": "user", "content": "Say 'hello' and nothing else."} + ], + ) + assert res is not None + assert res.choices is not None + assert len(res.choices) > 0 + assert res.choices[0].message is not None + assert res.choices[0].message.content is not None + assert len(res.choices[0].message.content) > 0 + + def test_completion_with_system_message(self, gcp_client): + """Test chat completion with system + user message.""" + res = gcp_client.chat.complete( + model=GCP_MODEL, + messages=[ + {"role": "system", "content": "You are a pirate. Respond in pirate speak."}, + {"role": "user", "content": "Say hello."}, + ], + ) + assert res is not None + assert res.choices[0].message.content is not None + assert len(res.choices[0].message.content) > 0 + + def test_completion_with_max_tokens(self, gcp_client): + """Test chat completion respects max_tokens.""" + res = gcp_client.chat.complete( + model=GCP_MODEL, + messages=[ + {"role": "user", "content": "Count from 1 to 100."} + ], + max_tokens=10, + ) + assert res is not None + assert res.choices[0].finish_reason in ("length", "stop") + + def test_completion_with_temperature(self, gcp_client): + """Test chat completion accepts temperature parameter.""" + res = gcp_client.chat.complete( + model=GCP_MODEL, + messages=[ + {"role": "user", "content": "Say 'test'."} + ], + temperature=0.0, + ) + assert res is not None + assert res.choices[0].message.content is not None + + def test_completion_with_stop_sequence(self, gcp_client): + """Test chat completion stops at stop sequence.""" + res = gcp_client.chat.complete( + model=GCP_MODEL, + messages=[ + {"role": "user", "content": "Write three sentences about the sky."} + ], + stop=["."], + ) + assert res is not None + content = res.choices[0].message.content + assert content is not None + # The model should stop at or before the first period + assert content.count(".") <= 1 + + def test_completion_with_random_seed(self, gcp_client): + """Test chat completion with random_seed returns valid responses.""" + res1 = gcp_client.chat.complete( + model=GCP_MODEL, + messages=[ + {"role": "user", "content": "Say 'deterministic'."} + ], + random_seed=42, + ) + res2 = gcp_client.chat.complete( + model=GCP_MODEL, + messages=[ + {"role": "user", "content": "Say 'deterministic'."} + ], + random_seed=42, + ) + # Both should return valid responses (not asserting equality due to model non-determinism) + assert res1.choices[0].message.content is not None + assert res2.choices[0].message.content is not None + + def test_multi_turn_conversation(self, gcp_client): + """Test multi-turn conversation with user/assistant round-trip.""" + res1 = gcp_client.chat.complete( + model=GCP_MODEL, + messages=[ + {"role": "user", "content": "My name is Alice."} + ], + ) + assert res1.choices[0].message.content is not None + + res2 = gcp_client.chat.complete( + model=GCP_MODEL, + messages=[ + {"role": "user", "content": "My name is Alice."}, + {"role": "assistant", "content": res1.choices[0].message.content}, + {"role": "user", "content": "What is my name?"}, + ], + ) + assert res2.choices[0].message.content is not None + assert "Alice" in res2.choices[0].message.content + + def test_tool_call(self, gcp_client): + """Test that the model returns a tool call when given tools.""" + res = gcp_client.chat.complete( + model=GCP_MODEL, + messages=[ + {"role": "user", "content": "What is the weather in Paris?"} + ], + tools=[WEATHER_TOOL], + tool_choice="any", + ) + assert res is not None + choice = res.choices[0] + assert choice.message.tool_calls is not None + assert len(choice.message.tool_calls) > 0 + tool_call = choice.message.tool_calls[0] + assert tool_call.function.name == "get_weather" + args = json.loads(tool_call.function.arguments) + assert "city" in args + + def test_json_response_format(self, gcp_client): + """Test JSON response format returns valid JSON.""" + res = gcp_client.chat.complete( + model=GCP_MODEL, + messages=[ + {"role": "user", "content": "Return a JSON object with a key 'greeting' and value 'hello'."} + ], + response_format={"type": "json_object"}, + ) + assert res is not None + content = res.choices[0].message.content + assert content is not None + parsed = json.loads(content) + assert isinstance(parsed, dict) + + +class TestGCPChatStream: + """Test streaming chat completion.""" + + def test_basic_stream(self, gcp_client): + """Test streaming returns chunks with content.""" + stream = gcp_client.chat.stream( + model=GCP_MODEL, + messages=[ + {"role": "user", "content": "Say 'hello' and nothing else."} + ], + ) + + chunks = list(stream) + assert len(chunks) > 0 + + content = "" + for chunk in chunks: + if chunk.data.choices and chunk.data.choices[0].delta.content: + content += chunk.data.choices[0].delta.content + + assert len(content) > 0 + + def test_stream_with_max_tokens(self, gcp_client): + """Test streaming respects max_tokens truncation.""" + stream = gcp_client.chat.stream( + model=GCP_MODEL, + messages=[ + {"role": "user", "content": "Count from 1 to 100."} + ], + max_tokens=10, + ) + + chunks = list(stream) + assert len(chunks) > 0 + + # Find finish_reason in any chunk + finish_reasons = [ + chunk.data.choices[0].finish_reason + for chunk in chunks + if chunk.data.choices and chunk.data.choices[0].finish_reason is not None + ] + assert len(finish_reasons) > 0 + assert finish_reasons[-1] in ("length", "stop") + + def test_stream_finish_reason(self, gcp_client): + """Test that the last chunk has a finish_reason.""" + stream = gcp_client.chat.stream( + model=GCP_MODEL, + messages=[ + {"role": "user", "content": "Say 'hi'."} + ], + ) + + chunks = list(stream) + assert len(chunks) > 0 + + # The final chunk(s) should contain a finish_reason + finish_reasons = [ + chunk.data.choices[0].finish_reason + for chunk in chunks + if chunk.data.choices and chunk.data.choices[0].finish_reason is not None + ] + assert len(finish_reasons) > 0 + assert finish_reasons[-1] == "stop" + + def test_stream_tool_call(self, gcp_client): + """Test tool call via streaming, collecting tool_call delta chunks.""" + stream = gcp_client.chat.stream( + model=GCP_MODEL, + messages=[ + {"role": "user", "content": "What is the weather in Paris?"} + ], + tools=[WEATHER_TOOL], + tool_choice="any", + ) + + chunks = list(stream) + assert len(chunks) > 0 + + # Collect tool call information from delta chunks + tool_call_found = False + for chunk in chunks: + if chunk.data.choices and chunk.data.choices[0].delta.tool_calls: + tool_call_found = True + break + + assert tool_call_found, "Expected tool_call delta chunks in stream" + + +class TestGCPChatCompleteAsync: + """Test async chat completion.""" + + @pytest.mark.asyncio + async def test_basic_completion_async(self, gcp_client): + """Test async chat completion returns a response.""" + res = await gcp_client.chat.complete_async( + model=GCP_MODEL, + messages=[ + {"role": "user", "content": "Say 'hello' and nothing else."} + ], + ) + assert res is not None + assert res.choices is not None + assert len(res.choices) > 0 + assert res.choices[0].message.content is not None + + @pytest.mark.asyncio + async def test_completion_with_system_message_async(self, gcp_client): + """Test async chat completion with system + user message.""" + res = await gcp_client.chat.complete_async( + model=GCP_MODEL, + messages=[ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "Say 'hello'."}, + ], + ) + assert res is not None + assert res.choices[0].message.content is not None + + @pytest.mark.asyncio + async def test_tool_call_async(self, gcp_client): + """Test async tool call returns tool_calls.""" + res = await gcp_client.chat.complete_async( + model=GCP_MODEL, + messages=[ + {"role": "user", "content": "What is the weather in Paris?"} + ], + tools=[WEATHER_TOOL], + tool_choice="any", + ) + assert res is not None + choice = res.choices[0] + assert choice.message.tool_calls is not None + assert len(choice.message.tool_calls) > 0 + assert choice.message.tool_calls[0].function.name == "get_weather" + + +class TestGCPChatStreamAsync: + """Test async streaming chat completion.""" + + @pytest.mark.asyncio + async def test_basic_stream_async(self, gcp_client): + """Test async streaming returns chunks with content.""" + stream = await gcp_client.chat.stream_async( + model=GCP_MODEL, + messages=[ + {"role": "user", "content": "Say 'hello' and nothing else."} + ], + ) + + content = "" + async for chunk in stream: + if chunk.data.choices and chunk.data.choices[0].delta.content: + content += chunk.data.choices[0].delta.content + + assert len(content) > 0 + + +class TestGCPContextManager: + """Test context manager support.""" + + def test_sync_context_manager(self): + """Test that MistralGCP works as a sync context manager.""" + from mistralai.gcp.client import MistralGCP + with MistralGCP( + project_id=GCP_PROJECT_ID, + region=GCP_REGION, + ) as client: + res = client.chat.complete( + model=GCP_MODEL, + messages=[ + {"role": "user", "content": "Say 'context'."} + ], + ) + assert res is not None + assert res.choices[0].message.content is not None + + @pytest.mark.asyncio + async def test_async_context_manager(self): + """Test that MistralGCP works as an async context manager.""" + from mistralai.gcp.client import MistralGCP + async with MistralGCP( + project_id=GCP_PROJECT_ID, + region=GCP_REGION, + ) as client: + res = await client.chat.complete_async( + model=GCP_MODEL, + messages=[ + {"role": "user", "content": "Say 'async context'."} + ], + ) + assert res is not None + assert res.choices[0].message.content is not None + + +class TestGCPFIM: + """Test FIM (Fill-in-the-middle) completion.""" + + def _make_fim_client(self): + """Create a GCP client configured for FIM model.""" + from mistralai.gcp.client import MistralGCP + return MistralGCP(project_id=GCP_PROJECT_ID, region=GCP_REGION) + + def test_fim_complete(self): + """Test FIM completion returns a response.""" + client = self._make_fim_client() + res = client.fim.complete( + model=GCP_FIM_MODEL, + prompt="def fib():", + suffix=" return result", + timeout_ms=10000, + ) + assert res is not None + assert res.choices is not None + assert len(res.choices) > 0 + assert res.choices[0].message.content is not None + + def test_fim_stream(self): + """Test FIM streaming returns chunks.""" + client = self._make_fim_client() + stream = client.fim.stream( + model=GCP_FIM_MODEL, + prompt="def hello():", + suffix=" return greeting", + timeout_ms=10000, + ) + chunks = list(stream) + assert len(chunks) > 0 + + content = "" + for chunk in chunks: + if chunk.data.choices and chunk.data.choices[0].delta.content: + delta_content = chunk.data.choices[0].delta.content + if isinstance(delta_content, str): + content += delta_content + assert len(content) > 0 + + def test_fim_with_max_tokens(self): + """Test FIM completion with max_tokens.""" + client = self._make_fim_client() + res = client.fim.complete( + model=GCP_FIM_MODEL, + prompt="def add(a, b):", + suffix=" return result", + max_tokens=10, + timeout_ms=10000, + ) + assert res is not None + assert res.choices[0].finish_reason in ("length", "stop") + + @pytest.mark.asyncio + async def test_fim_complete_async(self): + """Test async FIM completion returns a response.""" + client = self._make_fim_client() + res = await client.fim.complete_async( + model=GCP_FIM_MODEL, + prompt="def fib():", + suffix=" return result", + timeout_ms=10000, + ) + assert res is not None + assert res.choices is not None + assert len(res.choices) > 0 + assert res.choices[0].message.content is not None + + @pytest.mark.asyncio + async def test_fim_stream_async(self): + """Test async FIM streaming returns chunks.""" + client = self._make_fim_client() + stream = await client.fim.stream_async( + model=GCP_FIM_MODEL, + prompt="def hello():", + suffix=" return greeting", + timeout_ms=10000, + ) + chunks = [] + async for chunk in stream: + chunks.append(chunk) + assert len(chunks) > 0 + + content = "" + for chunk in chunks: + if chunk.data.choices and chunk.data.choices[0].delta.content: + delta_content = chunk.data.choices[0].delta.content + if isinstance(delta_content, str): + content += delta_content + assert len(content) > 0 diff --git a/tests/test_gcp_v2_parity.py b/tests/test_gcp_v2_parity.py new file mode 100644 index 00000000..0d6471e4 --- /dev/null +++ b/tests/test_gcp_v2_parity.py @@ -0,0 +1,330 @@ +""" +Parity tests for the GCP v2 SDK. + +Verifies that the regenerated mistralai.gcp package exposes +the same public API surface as the v1 mistralai_gcp package. +Uses introspection only — no API calls or credentials required. +""" +import inspect + +import pytest + +from mistralai.gcp.client import MistralGCP +from mistralai.gcp.client.chat import Chat +from mistralai.gcp.client.fim import Fim +from mistralai.gcp.client.types import UNSET + +GCP_METHODS: dict[str, set[str]] = { + "chat": {"complete", "stream"}, + "fim": {"complete", "stream"}, +} + +TESTED_METHODS: set[str] = set() + +_EMPTY = inspect.Parameter.empty + + +def mark_tested(resource: str, method: str) -> None: + TESTED_METHODS.add(f"{resource}.{method}") + + +# --------------------------------------------------------------------------- +# Expected parameter specs: (name, expected_default) +# Use _EMPTY for required params, UNSET for OptionalNullable, None for Optional +# --------------------------------------------------------------------------- + +CONSTRUCTOR_PARAMS = [ + ("project_id", None), + ("region", "europe-west4"), + ("access_token", None), + ("server", None), + ("server_url", None), + ("url_params", None), + ("client", None), + ("async_client", None), + ("retry_config", UNSET), + ("timeout_ms", None), + ("debug_logger", None), +] + +CHAT_COMPLETE_PARAMS = [ + ("model", _EMPTY), + ("messages", _EMPTY), + ("temperature", UNSET), + ("top_p", None), + ("max_tokens", UNSET), + ("stream", False), + ("stop", None), + ("random_seed", UNSET), + ("metadata", UNSET), + ("response_format", None), + ("tools", UNSET), + ("tool_choice", None), + ("presence_penalty", None), + ("frequency_penalty", None), + ("n", UNSET), + ("prediction", None), + ("parallel_tool_calls", None), + ("prompt_mode", UNSET), + ("retries", UNSET), + ("server_url", None), + ("timeout_ms", None), + ("http_headers", None), +] + +CHAT_STREAM_PARAMS = [ + (name, True if name == "stream" else default) + for name, default in CHAT_COMPLETE_PARAMS +] + +FIM_COMPLETE_PARAMS = [ + ("model", _EMPTY), + ("prompt", _EMPTY), + ("temperature", UNSET), + ("top_p", 1), + ("max_tokens", UNSET), + ("stream", False), + ("stop", None), + ("random_seed", UNSET), + ("metadata", UNSET), + ("suffix", UNSET), + ("min_tokens", UNSET), + ("retries", UNSET), + ("server_url", None), + ("timeout_ms", None), + ("http_headers", None), +] + +FIM_STREAM_PARAMS = [ + (name, True if name == "stream" else default) + for name, default in FIM_COMPLETE_PARAMS +] + + +# --------------------------------------------------------------------------- +# Tests +# --------------------------------------------------------------------------- + + +class TestGCPSDKStructure: + def test_sdk_has_chat(self): + assert "chat" in MistralGCP.__annotations__ + + def test_sdk_has_fim(self): + assert "fim" in MistralGCP.__annotations__ + + @pytest.mark.parametrize("param_name,expected_default", CONSTRUCTOR_PARAMS) + def test_constructor_param(self, param_name, expected_default): + sig = inspect.signature(MistralGCP.__init__) + assert param_name in sig.parameters, f"Missing constructor param: {param_name}" + actual = sig.parameters[param_name].default + assert actual == expected_default, ( + f"Constructor param {param_name}: expected {expected_default!r}, got {actual!r}" + ) + + @pytest.mark.parametrize("method", ["__enter__", "__exit__", "__aenter__", "__aexit__"]) + def test_context_manager_support(self, method): + assert hasattr(MistralGCP, method), f"MistralGCP missing {method}" + + +class TestGCPChat: + def test_has_complete(self): + assert hasattr(Chat, "complete") + mark_tested("chat", "complete") + + def test_has_complete_async(self): + assert hasattr(Chat, "complete_async") + mark_tested("chat", "complete_async") + + def test_has_stream(self): + assert hasattr(Chat, "stream") + mark_tested("chat", "stream") + + def test_has_stream_async(self): + assert hasattr(Chat, "stream_async") + mark_tested("chat", "stream_async") + + # -- complete params -- + @pytest.mark.parametrize("param_name,expected_default", CHAT_COMPLETE_PARAMS) + def test_complete_has_param(self, param_name, expected_default): + sig = inspect.signature(Chat.complete) + assert param_name in sig.parameters, f"Chat.complete missing param: {param_name}" + actual = sig.parameters[param_name].default + assert actual == expected_default, ( + f"Chat.complete param {param_name}: expected {expected_default!r}, got {actual!r}" + ) + + # -- stream params -- + @pytest.mark.parametrize("param_name,expected_default", CHAT_STREAM_PARAMS) + def test_stream_has_param(self, param_name, expected_default): + sig = inspect.signature(Chat.stream) + assert param_name in sig.parameters, f"Chat.stream missing param: {param_name}" + actual = sig.parameters[param_name].default + assert actual == expected_default, ( + f"Chat.stream param {param_name}: expected {expected_default!r}, got {actual!r}" + ) + + # -- complete_async matches complete -- + @pytest.mark.parametrize("param_name,expected_default", CHAT_COMPLETE_PARAMS) + def test_complete_async_has_param(self, param_name, expected_default): + sig = inspect.signature(Chat.complete_async) + assert param_name in sig.parameters, f"Chat.complete_async missing param: {param_name}" + actual = sig.parameters[param_name].default + assert actual == expected_default, ( + f"Chat.complete_async param {param_name}: expected {expected_default!r}, got {actual!r}" + ) + + # -- stream_async matches stream -- + @pytest.mark.parametrize("param_name,expected_default", CHAT_STREAM_PARAMS) + def test_stream_async_has_param(self, param_name, expected_default): + sig = inspect.signature(Chat.stream_async) + assert param_name in sig.parameters, f"Chat.stream_async missing param: {param_name}" + actual = sig.parameters[param_name].default + assert actual == expected_default, ( + f"Chat.stream_async param {param_name}: expected {expected_default!r}, got {actual!r}" + ) + + # -- sync/async parity -- + def test_complete_async_matches_complete(self): + sync_params = set(inspect.signature(Chat.complete).parameters) - {"self"} + async_params = set(inspect.signature(Chat.complete_async).parameters) - {"self"} + assert sync_params == async_params + + def test_stream_async_matches_stream(self): + sync_params = set(inspect.signature(Chat.stream).parameters) - {"self"} + async_params = set(inspect.signature(Chat.stream_async).parameters) - {"self"} + assert sync_params == async_params + + # -- key defaults -- + def test_complete_model_required(self): + sig = inspect.signature(Chat.complete) + assert sig.parameters["model"].default is _EMPTY + + def test_stream_model_required(self): + sig = inspect.signature(Chat.stream) + assert sig.parameters["model"].default is _EMPTY + + def test_complete_stream_defaults_false(self): + sig = inspect.signature(Chat.complete) + assert sig.parameters["stream"].default is False + + def test_stream_stream_defaults_true(self): + sig = inspect.signature(Chat.stream) + assert sig.parameters["stream"].default is True + + +class TestGCPFim: + def test_has_complete(self): + assert hasattr(Fim, "complete") + mark_tested("fim", "complete") + + def test_has_complete_async(self): + assert hasattr(Fim, "complete_async") + mark_tested("fim", "complete_async") + + def test_has_stream(self): + assert hasattr(Fim, "stream") + mark_tested("fim", "stream") + + def test_has_stream_async(self): + assert hasattr(Fim, "stream_async") + mark_tested("fim", "stream_async") + + # -- complete params -- + @pytest.mark.parametrize("param_name,expected_default", FIM_COMPLETE_PARAMS) + def test_complete_has_param(self, param_name, expected_default): + sig = inspect.signature(Fim.complete) + assert param_name in sig.parameters, f"Fim.complete missing param: {param_name}" + actual = sig.parameters[param_name].default + assert actual == expected_default, ( + f"Fim.complete param {param_name}: expected {expected_default!r}, got {actual!r}" + ) + + # -- stream params -- + @pytest.mark.parametrize("param_name,expected_default", FIM_STREAM_PARAMS) + def test_stream_has_param(self, param_name, expected_default): + sig = inspect.signature(Fim.stream) + assert param_name in sig.parameters, f"Fim.stream missing param: {param_name}" + actual = sig.parameters[param_name].default + assert actual == expected_default, ( + f"Fim.stream param {param_name}: expected {expected_default!r}, got {actual!r}" + ) + + # -- complete_async matches complete -- + @pytest.mark.parametrize("param_name,expected_default", FIM_COMPLETE_PARAMS) + def test_complete_async_has_param(self, param_name, expected_default): + sig = inspect.signature(Fim.complete_async) + assert param_name in sig.parameters, f"Fim.complete_async missing param: {param_name}" + actual = sig.parameters[param_name].default + assert actual == expected_default, ( + f"Fim.complete_async param {param_name}: expected {expected_default!r}, got {actual!r}" + ) + + # -- stream_async matches stream -- + @pytest.mark.parametrize("param_name,expected_default", FIM_STREAM_PARAMS) + def test_stream_async_has_param(self, param_name, expected_default): + sig = inspect.signature(Fim.stream_async) + assert param_name in sig.parameters, f"Fim.stream_async missing param: {param_name}" + actual = sig.parameters[param_name].default + assert actual == expected_default, ( + f"Fim.stream_async param {param_name}: expected {expected_default!r}, got {actual!r}" + ) + + # -- sync/async parity -- + def test_complete_async_matches_complete(self): + sync_params = set(inspect.signature(Fim.complete).parameters) - {"self"} + async_params = set(inspect.signature(Fim.complete_async).parameters) - {"self"} + assert sync_params == async_params + + def test_stream_async_matches_stream(self): + sync_params = set(inspect.signature(Fim.stream).parameters) - {"self"} + async_params = set(inspect.signature(Fim.stream_async).parameters) - {"self"} + assert sync_params == async_params + + # -- key defaults -- + def test_complete_model_required(self): + sig = inspect.signature(Fim.complete) + assert sig.parameters["model"].default is _EMPTY + + def test_stream_model_required(self): + sig = inspect.signature(Fim.stream) + assert sig.parameters["model"].default is _EMPTY + + def test_complete_stream_defaults_false(self): + sig = inspect.signature(Fim.complete) + assert sig.parameters["stream"].default is False + + def test_stream_stream_defaults_true(self): + sig = inspect.signature(Fim.stream) + assert sig.parameters["stream"].default is True + + def test_complete_top_p_defaults_to_1(self): + sig = inspect.signature(Fim.complete) + assert sig.parameters["top_p"].default == 1 + + def test_stream_top_p_defaults_to_1(self): + sig = inspect.signature(Fim.stream) + assert sig.parameters["top_p"].default == 1 + + +class TestGCPCoverage: + def test_all_methods_tested(self): + expected = set() + for resource, methods in GCP_METHODS.items(): + for method in methods: + expected.add(f"{resource}.{method}") + expected.add(f"{resource}.{method}_async") + untested = expected - TESTED_METHODS + assert not untested, f"Untested methods: {untested}" + + def test_no_unexpected_public_methods_on_chat(self): + public = {m for m in dir(Chat) if not m.startswith("_") and callable(getattr(Chat, m, None))} + known = {"complete", "complete_async", "stream", "stream_async", "do_request", "do_request_async"} + unexpected = public - known + assert not unexpected, f"Unexpected Chat methods: {unexpected}" + + def test_no_unexpected_public_methods_on_fim(self): + public = {m for m in dir(Fim) if not m.startswith("_") and callable(getattr(Fim, m, None))} + known = {"complete", "complete_async", "stream", "stream_async", "do_request", "do_request_async"} + unexpected = public - known + assert not unexpected, f"Unexpected Fim methods: {unexpected}" diff --git a/tests/test_prepare_readme.py b/tests/test_prepare_readme.py new file mode 100644 index 00000000..ce3e11c9 --- /dev/null +++ b/tests/test_prepare_readme.py @@ -0,0 +1,37 @@ +import importlib.util +from pathlib import Path + +SCRIPT_PATH = Path(__file__).resolve().parents[1] / "scripts" / "prepare_readme.py" +SPEC = importlib.util.spec_from_file_location("prepare_readme", SCRIPT_PATH) +if SPEC is None or SPEC.loader is None: + raise ImportError(f"Unable to load prepare_readme from {SCRIPT_PATH}") +prepare_readme = importlib.util.module_from_spec(SPEC) +SPEC.loader.exec_module(prepare_readme) + + +def test_rewrite_relative_links_keeps_absolute() -> None: + base_url = "https://example.com/blob/main/" + contents = "[Migration](MIGRATION.md)\n[Docs](https://docs.mistral.ai)" + expected = ( + "[Migration](https://example.com/blob/main/MIGRATION.md)\n" + "[Docs](https://docs.mistral.ai)" + ) + assert prepare_readme.rewrite_relative_links(contents, base_url) == expected + + +def test_main_prints_rewritten_readme_with_defaults(tmp_path, capsys) -> None: + original = "[Migration](MIGRATION.md)\n" + base_url = prepare_readme.build_base_url( + prepare_readme.DEFAULT_REPO_URL, + prepare_readme.DEFAULT_BRANCH, + "", + ) + expected = f"[Migration]({base_url}MIGRATION.md)\n" + readme_path = tmp_path / "README.md" + readme_path.write_text(original, encoding="utf-8") + + exit_code = prepare_readme.main(["--readme", str(readme_path)]) + + captured = capsys.readouterr() + assert exit_code == 0 + assert captured.out == expected diff --git a/utils/speakeasy.py b/utils/speakeasy.py new file mode 100755 index 00000000..7c685feb --- /dev/null +++ b/utils/speakeasy.py @@ -0,0 +1,91 @@ +""" +This script: +- pins the OpenAPI specs, +- runs speakeasy to update the SDKs' files, +- and then unpins the OpenAPI specs. + +It is advised to often run this script to avoid getting unrelated changes (due to updates) when modifying the OpenAPI specs. +""" + +import yaml +from io import TextIOWrapper +import copy +import subprocess +from enum import Enum + +WORKFLOW_PATH = ".speakeasy/workflow.yaml" +WORKFLOW_LOCK_PATH = ".speakeasy/workflow.lock" + + +def set_location_rev(yaml_content: dict, source_name: str, new_rev: str) -> None: + registry = yaml_content["sources"][source_name]["inputs"][0]["location"].split(":")[0] + yaml_content["sources"][source_name]["inputs"][0]["location"] = f"{registry}:{new_rev}" + + +def write_yaml(yaml_content: dict, file: TextIOWrapper) -> None: + return yaml.dump( + yaml_content, file, default_flow_style=False, sort_keys=False, indent=4 + ) + +def pin_speakeasy_version(workflow_path: str, version: str): + with open(workflow_path, "r") as file: + workflow_yaml = yaml.safe_load(file) + workflow_yaml["speakeasyVersion"] = version + with open(workflow_path, "w") as file: + write_yaml(workflow_yaml, file) + +class OpenAPISpecsPinned: + def __init__(self, workflow_path: str, workflow_lock_path: str): + self.workflow_path = workflow_path + self.workflow_lock_path = workflow_lock_path + with open(workflow_path, "r") as file: + self.workflow_yaml = yaml.safe_load(file) + + def __enter__(self): + print("OpenAPI specs pinned to current revision") + self.pin_to_current_rev() + + def __exit__(self, exc_type, exc_value, traceback): + self.unpin() + print("OpenAPI specs unpinned") + + def pin_to_current_rev(self): + yaml_copy = copy.deepcopy(self.workflow_yaml) + # Getting the current revisions of the OpenAPI specs + with open(self.workflow_lock_path, "r") as lock_file: + yaml_lock = yaml.safe_load(lock_file) + rev_azure = yaml_lock["sources"]["mistral-azure-source"]["sourceRevisionDigest"] + rev_google_cloud = yaml_lock["sources"]["mistral-google-cloud-source"]["sourceRevisionDigest"] + rev_mistralai = yaml_lock["sources"]["mistral-openapi"]["sourceRevisionDigest"] + + # Pinning the OpenAPI specs to the current revisions + with open(self.workflow_path, "w") as file: + set_location_rev(yaml_copy, "mistral-azure-source", rev_azure) + set_location_rev(yaml_copy, "mistral-google-cloud-source", rev_google_cloud) + set_location_rev(yaml_copy, "mistral-openapi", rev_mistralai) + write_yaml(yaml_content=yaml_copy, file=file) + + + def unpin(self): + with open(self.workflow_path, "w") as file: + write_yaml(yaml_content=self.workflow_yaml, file=file) + +class SpeakeasyTargets(str, Enum): + """ + The list of targets defined in the .speakeasy/workflow.yaml[.targets] section. + This can also be listed running `speakeasy list targets` in the root of the project. + """ + ALL = "all" + MISTRALAI_SDK = "mistralai-sdk" + MISTRALAI_AZURE_SDK = "mistralai-azure-sdk" + MISTRALAI_GCP_SDK = "mistralai-gcp-sdk" + + @classmethod + def list(cls): + return list(map(lambda c: c.value, cls)) + + +if __name__ == "__main__": + pin_speakeasy_version(workflow_path=WORKFLOW_PATH, version="1.580.2") + with OpenAPISpecsPinned(WORKFLOW_PATH, WORKFLOW_LOCK_PATH): + subprocess.run(["speakeasy", "run", "-t", "mistralai-sdk", "--skip-versioning", "--verbose"]) diff --git a/uv.lock b/uv.lock new file mode 100644 index 00000000..7139deab --- /dev/null +++ b/uv.lock @@ -0,0 +1,1646 @@ +version = 1 +revision = 3 +requires-python = ">=3.10" +resolution-markers = [ + "python_full_version >= '3.12'", + "python_full_version == '3.11.*'", + "python_full_version < '3.11'", +] + +[[package]] +name = "annotated-types" +version = "0.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ee/67/531ea369ba64dcff5ec9c3402f9f51bf748cec26dde048a2f973a4eea7f5/annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89", size = 16081, upload-time = "2024-05-20T21:33:25.928Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53", size = 13643, upload-time = "2024-05-20T21:33:24.1Z" }, +] + +[[package]] +name = "anyio" +version = "4.12.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "exceptiongroup", marker = "python_full_version < '3.11'" }, + { name = "idna" }, + { name = "typing-extensions", marker = "python_full_version < '3.13'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/16/ce/8a777047513153587e5434fd752e89334ac33e379aa3497db860eeb60377/anyio-4.12.0.tar.gz", hash = "sha256:73c693b567b0c55130c104d0b43a9baf3aa6a31fc6110116509f27bf75e21ec0", size = 228266, upload-time = "2025-11-28T23:37:38.911Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7f/9c/36c5c37947ebfb8c7f22e0eb6e4d188ee2d53aa3880f3f2744fb894f0cb1/anyio-4.12.0-py3-none-any.whl", hash = "sha256:dad2376a628f98eeca4881fc56cd06affd18f659b17a747d3ff0307ced94b1bb", size = 113362, upload-time = "2025-11-28T23:36:57.897Z" }, +] + +[[package]] +name = "astroid" +version = "3.2.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/9e/53/1067e1113ecaf58312357f2cd93063674924119d80d173adc3f6f2387aa2/astroid-3.2.4.tar.gz", hash = "sha256:0e14202810b30da1b735827f78f5157be2bbd4a7a59b7707ca0bfc2fb4c0063a", size = 397576, upload-time = "2024-07-20T12:57:43.26Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/80/96/b32bbbb46170a1c8b8b1f28c794202e25cfe743565e9d3469b8eb1e0cc05/astroid-3.2.4-py3-none-any.whl", hash = "sha256:413658a61eeca6202a59231abb473f932038fbcbf1666587f66d482083413a25", size = 276348, upload-time = "2024-07-20T12:57:40.886Z" }, +] + +[[package]] +name = "attrs" +version = "25.4.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6b/5c/685e6633917e101e5dcb62b9dd76946cbb57c26e133bae9e0cd36033c0a9/attrs-25.4.0.tar.gz", hash = "sha256:16d5969b87f0859ef33a48b35d55ac1be6e42ae49d5e853b597db70c35c57e11", size = 934251, upload-time = "2025-10-06T13:54:44.725Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3a/2a/7cc015f5b9f5db42b7d48157e23356022889fc354a2813c15934b7cb5c0e/attrs-25.4.0-py3-none-any.whl", hash = "sha256:adcf7e2a1fb3b36ac48d97835bb6d8ade15b8dcce26aba8bf1d14847b57a3373", size = 67615, upload-time = "2025-10-06T13:54:43.17Z" }, +] + +[[package]] +name = "authlib" +version = "1.6.6" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cryptography" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/bb/9b/b1661026ff24bc641b76b78c5222d614776b0c085bcfdac9bd15a1cb4b35/authlib-1.6.6.tar.gz", hash = "sha256:45770e8e056d0f283451d9996fbb59b70d45722b45d854d58f32878d0a40c38e", size = 164894, upload-time = "2025-12-12T08:01:41.464Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/54/51/321e821856452f7386c4e9df866f196720b1ad0c5ea1623ea7399969ae3b/authlib-1.6.6-py2.py3-none-any.whl", hash = "sha256:7d9e9bc535c13974313a87f53e8430eb6ea3d1cf6ae4f6efcd793f2e949143fd", size = 244005, upload-time = "2025-12-12T08:01:40.209Z" }, +] + +[[package]] +name = "cachetools" +version = "6.2.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/bc/1d/ede8680603f6016887c062a2cf4fc8fdba905866a3ab8831aa8aa651320c/cachetools-6.2.4.tar.gz", hash = "sha256:82c5c05585e70b6ba2d3ae09ea60b79548872185d2f24ae1f2709d37299fd607", size = 31731, upload-time = "2025-12-15T18:24:53.744Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2c/fc/1d7b80d0eb7b714984ce40efc78859c022cd930e402f599d8ca9e39c78a4/cachetools-6.2.4-py3-none-any.whl", hash = "sha256:69a7a52634fed8b8bf6e24a050fb60bff1c9bd8f6d24572b99c32d4e71e62a51", size = 11551, upload-time = "2025-12-15T18:24:52.332Z" }, +] + +[[package]] +name = "certifi" +version = "2025.11.12" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a2/8c/58f469717fa48465e4a50c014a0400602d3c437d7c0c468e17ada824da3a/certifi-2025.11.12.tar.gz", hash = "sha256:d8ab5478f2ecd78af242878415affce761ca6bc54a22a27e026d7c25357c3316", size = 160538, upload-time = "2025-11-12T02:54:51.517Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/70/7d/9bc192684cea499815ff478dfcdc13835ddf401365057044fb721ec6bddb/certifi-2025.11.12-py3-none-any.whl", hash = "sha256:97de8790030bbd5c2d96b7ec782fc2f7820ef8dba6db909ccf95449f2d062d4b", size = 159438, upload-time = "2025-11-12T02:54:49.735Z" }, +] + +[[package]] +name = "cffi" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pycparser", marker = "implementation_name != 'PyPy'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/eb/56/b1ba7935a17738ae8453301356628e8147c79dbb825bcbc73dc7401f9846/cffi-2.0.0.tar.gz", hash = "sha256:44d1b5909021139fe36001ae048dbdde8214afa20200eda0f64c068cac5d5529", size = 523588, upload-time = "2025-09-08T23:24:04.541Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/93/d7/516d984057745a6cd96575eea814fe1edd6646ee6efd552fb7b0921dec83/cffi-2.0.0-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:0cf2d91ecc3fcc0625c2c530fe004f82c110405f101548512cce44322fa8ac44", size = 184283, upload-time = "2025-09-08T23:22:08.01Z" }, + { url = "https://files.pythonhosted.org/packages/9e/84/ad6a0b408daa859246f57c03efd28e5dd1b33c21737c2db84cae8c237aa5/cffi-2.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f73b96c41e3b2adedc34a7356e64c8eb96e03a3782b535e043a986276ce12a49", size = 180504, upload-time = "2025-09-08T23:22:10.637Z" }, + { url = "https://files.pythonhosted.org/packages/50/bd/b1a6362b80628111e6653c961f987faa55262b4002fcec42308cad1db680/cffi-2.0.0-cp310-cp310-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:53f77cbe57044e88bbd5ed26ac1d0514d2acf0591dd6bb02a3ae37f76811b80c", size = 208811, upload-time = "2025-09-08T23:22:12.267Z" }, + { url = "https://files.pythonhosted.org/packages/4f/27/6933a8b2562d7bd1fb595074cf99cc81fc3789f6a6c05cdabb46284a3188/cffi-2.0.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:3e837e369566884707ddaf85fc1744b47575005c0a229de3327f8f9a20f4efeb", size = 216402, upload-time = "2025-09-08T23:22:13.455Z" }, + { url = "https://files.pythonhosted.org/packages/05/eb/b86f2a2645b62adcfff53b0dd97e8dfafb5c8aa864bd0d9a2c2049a0d551/cffi-2.0.0-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:5eda85d6d1879e692d546a078b44251cdd08dd1cfb98dfb77b670c97cee49ea0", size = 203217, upload-time = "2025-09-08T23:22:14.596Z" }, + { url = "https://files.pythonhosted.org/packages/9f/e0/6cbe77a53acf5acc7c08cc186c9928864bd7c005f9efd0d126884858a5fe/cffi-2.0.0-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:9332088d75dc3241c702d852d4671613136d90fa6881da7d770a483fd05248b4", size = 203079, upload-time = "2025-09-08T23:22:15.769Z" }, + { url = "https://files.pythonhosted.org/packages/98/29/9b366e70e243eb3d14a5cb488dfd3a0b6b2f1fb001a203f653b93ccfac88/cffi-2.0.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fc7de24befaeae77ba923797c7c87834c73648a05a4bde34b3b7e5588973a453", size = 216475, upload-time = "2025-09-08T23:22:17.427Z" }, + { url = "https://files.pythonhosted.org/packages/21/7a/13b24e70d2f90a322f2900c5d8e1f14fa7e2a6b3332b7309ba7b2ba51a5a/cffi-2.0.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:cf364028c016c03078a23b503f02058f1814320a56ad535686f90565636a9495", size = 218829, upload-time = "2025-09-08T23:22:19.069Z" }, + { url = "https://files.pythonhosted.org/packages/60/99/c9dc110974c59cc981b1f5b66e1d8af8af764e00f0293266824d9c4254bc/cffi-2.0.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e11e82b744887154b182fd3e7e8512418446501191994dbf9c9fc1f32cc8efd5", size = 211211, upload-time = "2025-09-08T23:22:20.588Z" }, + { url = "https://files.pythonhosted.org/packages/49/72/ff2d12dbf21aca1b32a40ed792ee6b40f6dc3a9cf1644bd7ef6e95e0ac5e/cffi-2.0.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:8ea985900c5c95ce9db1745f7933eeef5d314f0565b27625d9a10ec9881e1bfb", size = 218036, upload-time = "2025-09-08T23:22:22.143Z" }, + { url = "https://files.pythonhosted.org/packages/e2/cc/027d7fb82e58c48ea717149b03bcadcbdc293553edb283af792bd4bcbb3f/cffi-2.0.0-cp310-cp310-win32.whl", hash = "sha256:1f72fb8906754ac8a2cc3f9f5aaa298070652a0ffae577e0ea9bd480dc3c931a", size = 172184, upload-time = "2025-09-08T23:22:23.328Z" }, + { url = "https://files.pythonhosted.org/packages/33/fa/072dd15ae27fbb4e06b437eb6e944e75b068deb09e2a2826039e49ee2045/cffi-2.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:b18a3ed7d5b3bd8d9ef7a8cb226502c6bf8308df1525e1cc676c3680e7176739", size = 182790, upload-time = "2025-09-08T23:22:24.752Z" }, + { url = "https://files.pythonhosted.org/packages/12/4a/3dfd5f7850cbf0d06dc84ba9aa00db766b52ca38d8b86e3a38314d52498c/cffi-2.0.0-cp311-cp311-macosx_10_13_x86_64.whl", hash = "sha256:b4c854ef3adc177950a8dfc81a86f5115d2abd545751a304c5bcf2c2c7283cfe", size = 184344, upload-time = "2025-09-08T23:22:26.456Z" }, + { url = "https://files.pythonhosted.org/packages/4f/8b/f0e4c441227ba756aafbe78f117485b25bb26b1c059d01f137fa6d14896b/cffi-2.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2de9a304e27f7596cd03d16f1b7c72219bd944e99cc52b84d0145aefb07cbd3c", size = 180560, upload-time = "2025-09-08T23:22:28.197Z" }, + { url = "https://files.pythonhosted.org/packages/b1/b7/1200d354378ef52ec227395d95c2576330fd22a869f7a70e88e1447eb234/cffi-2.0.0-cp311-cp311-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:baf5215e0ab74c16e2dd324e8ec067ef59e41125d3eade2b863d294fd5035c92", size = 209613, upload-time = "2025-09-08T23:22:29.475Z" }, + { url = "https://files.pythonhosted.org/packages/b8/56/6033f5e86e8cc9bb629f0077ba71679508bdf54a9a5e112a3c0b91870332/cffi-2.0.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:730cacb21e1bdff3ce90babf007d0a0917cc3e6492f336c2f0134101e0944f93", size = 216476, upload-time = "2025-09-08T23:22:31.063Z" }, + { url = "https://files.pythonhosted.org/packages/dc/7f/55fecd70f7ece178db2f26128ec41430d8720f2d12ca97bf8f0a628207d5/cffi-2.0.0-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:6824f87845e3396029f3820c206e459ccc91760e8fa24422f8b0c3d1731cbec5", size = 203374, upload-time = "2025-09-08T23:22:32.507Z" }, + { url = "https://files.pythonhosted.org/packages/84/ef/a7b77c8bdc0f77adc3b46888f1ad54be8f3b7821697a7b89126e829e676a/cffi-2.0.0-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:9de40a7b0323d889cf8d23d1ef214f565ab154443c42737dfe52ff82cf857664", size = 202597, upload-time = "2025-09-08T23:22:34.132Z" }, + { url = "https://files.pythonhosted.org/packages/d7/91/500d892b2bf36529a75b77958edfcd5ad8e2ce4064ce2ecfeab2125d72d1/cffi-2.0.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:8941aaadaf67246224cee8c3803777eed332a19d909b47e29c9842ef1e79ac26", size = 215574, upload-time = "2025-09-08T23:22:35.443Z" }, + { url = "https://files.pythonhosted.org/packages/44/64/58f6255b62b101093d5df22dcb752596066c7e89dd725e0afaed242a61be/cffi-2.0.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:a05d0c237b3349096d3981b727493e22147f934b20f6f125a3eba8f994bec4a9", size = 218971, upload-time = "2025-09-08T23:22:36.805Z" }, + { url = "https://files.pythonhosted.org/packages/ab/49/fa72cebe2fd8a55fbe14956f9970fe8eb1ac59e5df042f603ef7c8ba0adc/cffi-2.0.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:94698a9c5f91f9d138526b48fe26a199609544591f859c870d477351dc7b2414", size = 211972, upload-time = "2025-09-08T23:22:38.436Z" }, + { url = "https://files.pythonhosted.org/packages/0b/28/dd0967a76aab36731b6ebfe64dec4e981aff7e0608f60c2d46b46982607d/cffi-2.0.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:5fed36fccc0612a53f1d4d9a816b50a36702c28a2aa880cb8a122b3466638743", size = 217078, upload-time = "2025-09-08T23:22:39.776Z" }, + { url = "https://files.pythonhosted.org/packages/2b/c0/015b25184413d7ab0a410775fdb4a50fca20f5589b5dab1dbbfa3baad8ce/cffi-2.0.0-cp311-cp311-win32.whl", hash = "sha256:c649e3a33450ec82378822b3dad03cc228b8f5963c0c12fc3b1e0ab940f768a5", size = 172076, upload-time = "2025-09-08T23:22:40.95Z" }, + { url = "https://files.pythonhosted.org/packages/ae/8f/dc5531155e7070361eb1b7e4c1a9d896d0cb21c49f807a6c03fd63fc877e/cffi-2.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:66f011380d0e49ed280c789fbd08ff0d40968ee7b665575489afa95c98196ab5", size = 182820, upload-time = "2025-09-08T23:22:42.463Z" }, + { url = "https://files.pythonhosted.org/packages/95/5c/1b493356429f9aecfd56bc171285a4c4ac8697f76e9bbbbb105e537853a1/cffi-2.0.0-cp311-cp311-win_arm64.whl", hash = "sha256:c6638687455baf640e37344fe26d37c404db8b80d037c3d29f58fe8d1c3b194d", size = 177635, upload-time = "2025-09-08T23:22:43.623Z" }, + { url = "https://files.pythonhosted.org/packages/ea/47/4f61023ea636104d4f16ab488e268b93008c3d0bb76893b1b31db1f96802/cffi-2.0.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:6d02d6655b0e54f54c4ef0b94eb6be0607b70853c45ce98bd278dc7de718be5d", size = 185271, upload-time = "2025-09-08T23:22:44.795Z" }, + { url = "https://files.pythonhosted.org/packages/df/a2/781b623f57358e360d62cdd7a8c681f074a71d445418a776eef0aadb4ab4/cffi-2.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8eca2a813c1cb7ad4fb74d368c2ffbbb4789d377ee5bb8df98373c2cc0dee76c", size = 181048, upload-time = "2025-09-08T23:22:45.938Z" }, + { url = "https://files.pythonhosted.org/packages/ff/df/a4f0fbd47331ceeba3d37c2e51e9dfc9722498becbeec2bd8bc856c9538a/cffi-2.0.0-cp312-cp312-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:21d1152871b019407d8ac3985f6775c079416c282e431a4da6afe7aefd2bccbe", size = 212529, upload-time = "2025-09-08T23:22:47.349Z" }, + { url = "https://files.pythonhosted.org/packages/d5/72/12b5f8d3865bf0f87cf1404d8c374e7487dcf097a1c91c436e72e6badd83/cffi-2.0.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:b21e08af67b8a103c71a250401c78d5e0893beff75e28c53c98f4de42f774062", size = 220097, upload-time = "2025-09-08T23:22:48.677Z" }, + { url = "https://files.pythonhosted.org/packages/c2/95/7a135d52a50dfa7c882ab0ac17e8dc11cec9d55d2c18dda414c051c5e69e/cffi-2.0.0-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:1e3a615586f05fc4065a8b22b8152f0c1b00cdbc60596d187c2a74f9e3036e4e", size = 207983, upload-time = "2025-09-08T23:22:50.06Z" }, + { url = "https://files.pythonhosted.org/packages/3a/c8/15cb9ada8895957ea171c62dc78ff3e99159ee7adb13c0123c001a2546c1/cffi-2.0.0-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:81afed14892743bbe14dacb9e36d9e0e504cd204e0b165062c488942b9718037", size = 206519, upload-time = "2025-09-08T23:22:51.364Z" }, + { url = "https://files.pythonhosted.org/packages/78/2d/7fa73dfa841b5ac06c7b8855cfc18622132e365f5b81d02230333ff26e9e/cffi-2.0.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3e17ed538242334bf70832644a32a7aae3d83b57567f9fd60a26257e992b79ba", size = 219572, upload-time = "2025-09-08T23:22:52.902Z" }, + { url = "https://files.pythonhosted.org/packages/07/e0/267e57e387b4ca276b90f0434ff88b2c2241ad72b16d31836adddfd6031b/cffi-2.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3925dd22fa2b7699ed2617149842d2e6adde22b262fcbfada50e3d195e4b3a94", size = 222963, upload-time = "2025-09-08T23:22:54.518Z" }, + { url = "https://files.pythonhosted.org/packages/b6/75/1f2747525e06f53efbd878f4d03bac5b859cbc11c633d0fb81432d98a795/cffi-2.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:2c8f814d84194c9ea681642fd164267891702542f028a15fc97d4674b6206187", size = 221361, upload-time = "2025-09-08T23:22:55.867Z" }, + { url = "https://files.pythonhosted.org/packages/7b/2b/2b6435f76bfeb6bbf055596976da087377ede68df465419d192acf00c437/cffi-2.0.0-cp312-cp312-win32.whl", hash = "sha256:da902562c3e9c550df360bfa53c035b2f241fed6d9aef119048073680ace4a18", size = 172932, upload-time = "2025-09-08T23:22:57.188Z" }, + { url = "https://files.pythonhosted.org/packages/f8/ed/13bd4418627013bec4ed6e54283b1959cf6db888048c7cf4b4c3b5b36002/cffi-2.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:da68248800ad6320861f129cd9c1bf96ca849a2771a59e0344e88681905916f5", size = 183557, upload-time = "2025-09-08T23:22:58.351Z" }, + { url = "https://files.pythonhosted.org/packages/95/31/9f7f93ad2f8eff1dbc1c3656d7ca5bfd8fb52c9d786b4dcf19b2d02217fa/cffi-2.0.0-cp312-cp312-win_arm64.whl", hash = "sha256:4671d9dd5ec934cb9a73e7ee9676f9362aba54f7f34910956b84d727b0d73fb6", size = 177762, upload-time = "2025-09-08T23:22:59.668Z" }, + { url = "https://files.pythonhosted.org/packages/4b/8d/a0a47a0c9e413a658623d014e91e74a50cdd2c423f7ccfd44086ef767f90/cffi-2.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:00bdf7acc5f795150faa6957054fbbca2439db2f775ce831222b66f192f03beb", size = 185230, upload-time = "2025-09-08T23:23:00.879Z" }, + { url = "https://files.pythonhosted.org/packages/4a/d2/a6c0296814556c68ee32009d9c2ad4f85f2707cdecfd7727951ec228005d/cffi-2.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:45d5e886156860dc35862657e1494b9bae8dfa63bf56796f2fb56e1679fc0bca", size = 181043, upload-time = "2025-09-08T23:23:02.231Z" }, + { url = "https://files.pythonhosted.org/packages/b0/1e/d22cc63332bd59b06481ceaac49d6c507598642e2230f201649058a7e704/cffi-2.0.0-cp313-cp313-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:07b271772c100085dd28b74fa0cd81c8fb1a3ba18b21e03d7c27f3436a10606b", size = 212446, upload-time = "2025-09-08T23:23:03.472Z" }, + { url = "https://files.pythonhosted.org/packages/a9/f5/a2c23eb03b61a0b8747f211eb716446c826ad66818ddc7810cc2cc19b3f2/cffi-2.0.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d48a880098c96020b02d5a1f7d9251308510ce8858940e6fa99ece33f610838b", size = 220101, upload-time = "2025-09-08T23:23:04.792Z" }, + { url = "https://files.pythonhosted.org/packages/f2/7f/e6647792fc5850d634695bc0e6ab4111ae88e89981d35ac269956605feba/cffi-2.0.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:f93fd8e5c8c0a4aa1f424d6173f14a892044054871c771f8566e4008eaa359d2", size = 207948, upload-time = "2025-09-08T23:23:06.127Z" }, + { url = "https://files.pythonhosted.org/packages/cb/1e/a5a1bd6f1fb30f22573f76533de12a00bf274abcdc55c8edab639078abb6/cffi-2.0.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:dd4f05f54a52fb558f1ba9f528228066954fee3ebe629fc1660d874d040ae5a3", size = 206422, upload-time = "2025-09-08T23:23:07.753Z" }, + { url = "https://files.pythonhosted.org/packages/98/df/0a1755e750013a2081e863e7cd37e0cdd02664372c754e5560099eb7aa44/cffi-2.0.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c8d3b5532fc71b7a77c09192b4a5a200ea992702734a2e9279a37f2478236f26", size = 219499, upload-time = "2025-09-08T23:23:09.648Z" }, + { url = "https://files.pythonhosted.org/packages/50/e1/a969e687fcf9ea58e6e2a928ad5e2dd88cc12f6f0ab477e9971f2309b57c/cffi-2.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:d9b29c1f0ae438d5ee9acb31cadee00a58c46cc9c0b2f9038c6b0b3470877a8c", size = 222928, upload-time = "2025-09-08T23:23:10.928Z" }, + { url = "https://files.pythonhosted.org/packages/36/54/0362578dd2c9e557a28ac77698ed67323ed5b9775ca9d3fe73fe191bb5d8/cffi-2.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6d50360be4546678fc1b79ffe7a66265e28667840010348dd69a314145807a1b", size = 221302, upload-time = "2025-09-08T23:23:12.42Z" }, + { url = "https://files.pythonhosted.org/packages/eb/6d/bf9bda840d5f1dfdbf0feca87fbdb64a918a69bca42cfa0ba7b137c48cb8/cffi-2.0.0-cp313-cp313-win32.whl", hash = "sha256:74a03b9698e198d47562765773b4a8309919089150a0bb17d829ad7b44b60d27", size = 172909, upload-time = "2025-09-08T23:23:14.32Z" }, + { url = "https://files.pythonhosted.org/packages/37/18/6519e1ee6f5a1e579e04b9ddb6f1676c17368a7aba48299c3759bbc3c8b3/cffi-2.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:19f705ada2530c1167abacb171925dd886168931e0a7b78f5bffcae5c6b5be75", size = 183402, upload-time = "2025-09-08T23:23:15.535Z" }, + { url = "https://files.pythonhosted.org/packages/cb/0e/02ceeec9a7d6ee63bb596121c2c8e9b3a9e150936f4fbef6ca1943e6137c/cffi-2.0.0-cp313-cp313-win_arm64.whl", hash = "sha256:256f80b80ca3853f90c21b23ee78cd008713787b1b1e93eae9f3d6a7134abd91", size = 177780, upload-time = "2025-09-08T23:23:16.761Z" }, + { url = "https://files.pythonhosted.org/packages/92/c4/3ce07396253a83250ee98564f8d7e9789fab8e58858f35d07a9a2c78de9f/cffi-2.0.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:fc33c5141b55ed366cfaad382df24fe7dcbc686de5be719b207bb248e3053dc5", size = 185320, upload-time = "2025-09-08T23:23:18.087Z" }, + { url = "https://files.pythonhosted.org/packages/59/dd/27e9fa567a23931c838c6b02d0764611c62290062a6d4e8ff7863daf9730/cffi-2.0.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:c654de545946e0db659b3400168c9ad31b5d29593291482c43e3564effbcee13", size = 181487, upload-time = "2025-09-08T23:23:19.622Z" }, + { url = "https://files.pythonhosted.org/packages/d6/43/0e822876f87ea8a4ef95442c3d766a06a51fc5298823f884ef87aaad168c/cffi-2.0.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:24b6f81f1983e6df8db3adc38562c83f7d4a0c36162885ec7f7b77c7dcbec97b", size = 220049, upload-time = "2025-09-08T23:23:20.853Z" }, + { url = "https://files.pythonhosted.org/packages/b4/89/76799151d9c2d2d1ead63c2429da9ea9d7aac304603de0c6e8764e6e8e70/cffi-2.0.0-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:12873ca6cb9b0f0d3a0da705d6086fe911591737a59f28b7936bdfed27c0d47c", size = 207793, upload-time = "2025-09-08T23:23:22.08Z" }, + { url = "https://files.pythonhosted.org/packages/bb/dd/3465b14bb9e24ee24cb88c9e3730f6de63111fffe513492bf8c808a3547e/cffi-2.0.0-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:d9b97165e8aed9272a6bb17c01e3cc5871a594a446ebedc996e2397a1c1ea8ef", size = 206300, upload-time = "2025-09-08T23:23:23.314Z" }, + { url = "https://files.pythonhosted.org/packages/47/d9/d83e293854571c877a92da46fdec39158f8d7e68da75bf73581225d28e90/cffi-2.0.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:afb8db5439b81cf9c9d0c80404b60c3cc9c3add93e114dcae767f1477cb53775", size = 219244, upload-time = "2025-09-08T23:23:24.541Z" }, + { url = "https://files.pythonhosted.org/packages/2b/0f/1f177e3683aead2bb00f7679a16451d302c436b5cbf2505f0ea8146ef59e/cffi-2.0.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:737fe7d37e1a1bffe70bd5754ea763a62a066dc5913ca57e957824b72a85e205", size = 222828, upload-time = "2025-09-08T23:23:26.143Z" }, + { url = "https://files.pythonhosted.org/packages/c6/0f/cafacebd4b040e3119dcb32fed8bdef8dfe94da653155f9d0b9dc660166e/cffi-2.0.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:38100abb9d1b1435bc4cc340bb4489635dc2f0da7456590877030c9b3d40b0c1", size = 220926, upload-time = "2025-09-08T23:23:27.873Z" }, + { url = "https://files.pythonhosted.org/packages/3e/aa/df335faa45b395396fcbc03de2dfcab242cd61a9900e914fe682a59170b1/cffi-2.0.0-cp314-cp314-win32.whl", hash = "sha256:087067fa8953339c723661eda6b54bc98c5625757ea62e95eb4898ad5e776e9f", size = 175328, upload-time = "2025-09-08T23:23:44.61Z" }, + { url = "https://files.pythonhosted.org/packages/bb/92/882c2d30831744296ce713f0feb4c1cd30f346ef747b530b5318715cc367/cffi-2.0.0-cp314-cp314-win_amd64.whl", hash = "sha256:203a48d1fb583fc7d78a4c6655692963b860a417c0528492a6bc21f1aaefab25", size = 185650, upload-time = "2025-09-08T23:23:45.848Z" }, + { url = "https://files.pythonhosted.org/packages/9f/2c/98ece204b9d35a7366b5b2c6539c350313ca13932143e79dc133ba757104/cffi-2.0.0-cp314-cp314-win_arm64.whl", hash = "sha256:dbd5c7a25a7cb98f5ca55d258b103a2054f859a46ae11aaf23134f9cc0d356ad", size = 180687, upload-time = "2025-09-08T23:23:47.105Z" }, + { url = "https://files.pythonhosted.org/packages/3e/61/c768e4d548bfa607abcda77423448df8c471f25dbe64fb2ef6d555eae006/cffi-2.0.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:9a67fc9e8eb39039280526379fb3a70023d77caec1852002b4da7e8b270c4dd9", size = 188773, upload-time = "2025-09-08T23:23:29.347Z" }, + { url = "https://files.pythonhosted.org/packages/2c/ea/5f76bce7cf6fcd0ab1a1058b5af899bfbef198bea4d5686da88471ea0336/cffi-2.0.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:7a66c7204d8869299919db4d5069a82f1561581af12b11b3c9f48c584eb8743d", size = 185013, upload-time = "2025-09-08T23:23:30.63Z" }, + { url = "https://files.pythonhosted.org/packages/be/b4/c56878d0d1755cf9caa54ba71e5d049479c52f9e4afc230f06822162ab2f/cffi-2.0.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7cc09976e8b56f8cebd752f7113ad07752461f48a58cbba644139015ac24954c", size = 221593, upload-time = "2025-09-08T23:23:31.91Z" }, + { url = "https://files.pythonhosted.org/packages/e0/0d/eb704606dfe8033e7128df5e90fee946bbcb64a04fcdaa97321309004000/cffi-2.0.0-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:92b68146a71df78564e4ef48af17551a5ddd142e5190cdf2c5624d0c3ff5b2e8", size = 209354, upload-time = "2025-09-08T23:23:33.214Z" }, + { url = "https://files.pythonhosted.org/packages/d8/19/3c435d727b368ca475fb8742ab97c9cb13a0de600ce86f62eab7fa3eea60/cffi-2.0.0-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:b1e74d11748e7e98e2f426ab176d4ed720a64412b6a15054378afdb71e0f37dc", size = 208480, upload-time = "2025-09-08T23:23:34.495Z" }, + { url = "https://files.pythonhosted.org/packages/d0/44/681604464ed9541673e486521497406fadcc15b5217c3e326b061696899a/cffi-2.0.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:28a3a209b96630bca57cce802da70c266eb08c6e97e5afd61a75611ee6c64592", size = 221584, upload-time = "2025-09-08T23:23:36.096Z" }, + { url = "https://files.pythonhosted.org/packages/25/8e/342a504ff018a2825d395d44d63a767dd8ebc927ebda557fecdaca3ac33a/cffi-2.0.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:7553fb2090d71822f02c629afe6042c299edf91ba1bf94951165613553984512", size = 224443, upload-time = "2025-09-08T23:23:37.328Z" }, + { url = "https://files.pythonhosted.org/packages/e1/5e/b666bacbbc60fbf415ba9988324a132c9a7a0448a9a8f125074671c0f2c3/cffi-2.0.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:6c6c373cfc5c83a975506110d17457138c8c63016b563cc9ed6e056a82f13ce4", size = 223437, upload-time = "2025-09-08T23:23:38.945Z" }, + { url = "https://files.pythonhosted.org/packages/a0/1d/ec1a60bd1a10daa292d3cd6bb0b359a81607154fb8165f3ec95fe003b85c/cffi-2.0.0-cp314-cp314t-win32.whl", hash = "sha256:1fc9ea04857caf665289b7a75923f2c6ed559b8298a1b8c49e59f7dd95c8481e", size = 180487, upload-time = "2025-09-08T23:23:40.423Z" }, + { url = "https://files.pythonhosted.org/packages/bf/41/4c1168c74fac325c0c8156f04b6749c8b6a8f405bbf91413ba088359f60d/cffi-2.0.0-cp314-cp314t-win_amd64.whl", hash = "sha256:d68b6cef7827e8641e8ef16f4494edda8b36104d79773a334beaa1e3521430f6", size = 191726, upload-time = "2025-09-08T23:23:41.742Z" }, + { url = "https://files.pythonhosted.org/packages/ae/3a/dbeec9d1ee0844c679f6bb5d6ad4e9f198b1224f4e7a32825f47f6192b0c/cffi-2.0.0-cp314-cp314t-win_arm64.whl", hash = "sha256:0a1527a803f0a659de1af2e1fd700213caba79377e27e4693648c2923da066f9", size = 184195, upload-time = "2025-09-08T23:23:43.004Z" }, +] + +[[package]] +name = "charset-normalizer" +version = "3.4.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/13/69/33ddede1939fdd074bce5434295f38fae7136463422fe4fd3e0e89b98062/charset_normalizer-3.4.4.tar.gz", hash = "sha256:94537985111c35f28720e43603b8e7b43a6ecfb2ce1d3058bbe955b73404e21a", size = 129418, upload-time = "2025-10-14T04:42:32.879Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1f/b8/6d51fc1d52cbd52cd4ccedd5b5b2f0f6a11bbf6765c782298b0f3e808541/charset_normalizer-3.4.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e824f1492727fa856dd6eda4f7cee25f8518a12f3c4a56a74e8095695089cf6d", size = 209709, upload-time = "2025-10-14T04:40:11.385Z" }, + { url = "https://files.pythonhosted.org/packages/5c/af/1f9d7f7faafe2ddfb6f72a2e07a548a629c61ad510fe60f9630309908fef/charset_normalizer-3.4.4-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4bd5d4137d500351a30687c2d3971758aac9a19208fc110ccb9d7188fbe709e8", size = 148814, upload-time = "2025-10-14T04:40:13.135Z" }, + { url = "https://files.pythonhosted.org/packages/79/3d/f2e3ac2bbc056ca0c204298ea4e3d9db9b4afe437812638759db2c976b5f/charset_normalizer-3.4.4-cp310-cp310-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:027f6de494925c0ab2a55eab46ae5129951638a49a34d87f4c3eda90f696b4ad", size = 144467, upload-time = "2025-10-14T04:40:14.728Z" }, + { url = "https://files.pythonhosted.org/packages/ec/85/1bf997003815e60d57de7bd972c57dc6950446a3e4ccac43bc3070721856/charset_normalizer-3.4.4-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f820802628d2694cb7e56db99213f930856014862f3fd943d290ea8438d07ca8", size = 162280, upload-time = "2025-10-14T04:40:16.14Z" }, + { url = "https://files.pythonhosted.org/packages/3e/8e/6aa1952f56b192f54921c436b87f2aaf7c7a7c3d0d1a765547d64fd83c13/charset_normalizer-3.4.4-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:798d75d81754988d2565bff1b97ba5a44411867c0cf32b77a7e8f8d84796b10d", size = 159454, upload-time = "2025-10-14T04:40:17.567Z" }, + { url = "https://files.pythonhosted.org/packages/36/3b/60cbd1f8e93aa25d1c669c649b7a655b0b5fb4c571858910ea9332678558/charset_normalizer-3.4.4-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9d1bb833febdff5c8927f922386db610b49db6e0d4f4ee29601d71e7c2694313", size = 153609, upload-time = "2025-10-14T04:40:19.08Z" }, + { url = "https://files.pythonhosted.org/packages/64/91/6a13396948b8fd3c4b4fd5bc74d045f5637d78c9675585e8e9fbe5636554/charset_normalizer-3.4.4-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:9cd98cdc06614a2f768d2b7286d66805f94c48cde050acdbbb7db2600ab3197e", size = 151849, upload-time = "2025-10-14T04:40:20.607Z" }, + { url = "https://files.pythonhosted.org/packages/b7/7a/59482e28b9981d105691e968c544cc0df3b7d6133152fb3dcdc8f135da7a/charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:077fbb858e903c73f6c9db43374fd213b0b6a778106bc7032446a8e8b5b38b93", size = 151586, upload-time = "2025-10-14T04:40:21.719Z" }, + { url = "https://files.pythonhosted.org/packages/92/59/f64ef6a1c4bdd2baf892b04cd78792ed8684fbc48d4c2afe467d96b4df57/charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:244bfb999c71b35de57821b8ea746b24e863398194a4014e4c76adc2bbdfeff0", size = 145290, upload-time = "2025-10-14T04:40:23.069Z" }, + { url = "https://files.pythonhosted.org/packages/6b/63/3bf9f279ddfa641ffa1962b0db6a57a9c294361cc2f5fcac997049a00e9c/charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:64b55f9dce520635f018f907ff1b0df1fdc31f2795a922fb49dd14fbcdf48c84", size = 163663, upload-time = "2025-10-14T04:40:24.17Z" }, + { url = "https://files.pythonhosted.org/packages/ed/09/c9e38fc8fa9e0849b172b581fd9803bdf6e694041127933934184e19f8c3/charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:faa3a41b2b66b6e50f84ae4a68c64fcd0c44355741c6374813a800cd6695db9e", size = 151964, upload-time = "2025-10-14T04:40:25.368Z" }, + { url = "https://files.pythonhosted.org/packages/d2/d1/d28b747e512d0da79d8b6a1ac18b7ab2ecfd81b2944c4c710e166d8dd09c/charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:6515f3182dbe4ea06ced2d9e8666d97b46ef4c75e326b79bb624110f122551db", size = 161064, upload-time = "2025-10-14T04:40:26.806Z" }, + { url = "https://files.pythonhosted.org/packages/bb/9a/31d62b611d901c3b9e5500c36aab0ff5eb442043fb3a1c254200d3d397d9/charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:cc00f04ed596e9dc0da42ed17ac5e596c6ccba999ba6bd92b0e0aef2f170f2d6", size = 155015, upload-time = "2025-10-14T04:40:28.284Z" }, + { url = "https://files.pythonhosted.org/packages/1f/f3/107e008fa2bff0c8b9319584174418e5e5285fef32f79d8ee6a430d0039c/charset_normalizer-3.4.4-cp310-cp310-win32.whl", hash = "sha256:f34be2938726fc13801220747472850852fe6b1ea75869a048d6f896838c896f", size = 99792, upload-time = "2025-10-14T04:40:29.613Z" }, + { url = "https://files.pythonhosted.org/packages/eb/66/e396e8a408843337d7315bab30dbf106c38966f1819f123257f5520f8a96/charset_normalizer-3.4.4-cp310-cp310-win_amd64.whl", hash = "sha256:a61900df84c667873b292c3de315a786dd8dac506704dea57bc957bd31e22c7d", size = 107198, upload-time = "2025-10-14T04:40:30.644Z" }, + { url = "https://files.pythonhosted.org/packages/b5/58/01b4f815bf0312704c267f2ccb6e5d42bcc7752340cd487bc9f8c3710597/charset_normalizer-3.4.4-cp310-cp310-win_arm64.whl", hash = "sha256:cead0978fc57397645f12578bfd2d5ea9138ea0fac82b2f63f7f7c6877986a69", size = 100262, upload-time = "2025-10-14T04:40:32.108Z" }, + { url = "https://files.pythonhosted.org/packages/ed/27/c6491ff4954e58a10f69ad90aca8a1b6fe9c5d3c6f380907af3c37435b59/charset_normalizer-3.4.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:6e1fcf0720908f200cd21aa4e6750a48ff6ce4afe7ff5a79a90d5ed8a08296f8", size = 206988, upload-time = "2025-10-14T04:40:33.79Z" }, + { url = "https://files.pythonhosted.org/packages/94/59/2e87300fe67ab820b5428580a53cad894272dbb97f38a7a814a2a1ac1011/charset_normalizer-3.4.4-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5f819d5fe9234f9f82d75bdfa9aef3a3d72c4d24a6e57aeaebba32a704553aa0", size = 147324, upload-time = "2025-10-14T04:40:34.961Z" }, + { url = "https://files.pythonhosted.org/packages/07/fb/0cf61dc84b2b088391830f6274cb57c82e4da8bbc2efeac8c025edb88772/charset_normalizer-3.4.4-cp311-cp311-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:a59cb51917aa591b1c4e6a43c132f0cdc3c76dbad6155df4e28ee626cc77a0a3", size = 142742, upload-time = "2025-10-14T04:40:36.105Z" }, + { url = "https://files.pythonhosted.org/packages/62/8b/171935adf2312cd745d290ed93cf16cf0dfe320863ab7cbeeae1dcd6535f/charset_normalizer-3.4.4-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:8ef3c867360f88ac904fd3f5e1f902f13307af9052646963ee08ff4f131adafc", size = 160863, upload-time = "2025-10-14T04:40:37.188Z" }, + { url = "https://files.pythonhosted.org/packages/09/73/ad875b192bda14f2173bfc1bc9a55e009808484a4b256748d931b6948442/charset_normalizer-3.4.4-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d9e45d7faa48ee908174d8fe84854479ef838fc6a705c9315372eacbc2f02897", size = 157837, upload-time = "2025-10-14T04:40:38.435Z" }, + { url = "https://files.pythonhosted.org/packages/6d/fc/de9cce525b2c5b94b47c70a4b4fb19f871b24995c728e957ee68ab1671ea/charset_normalizer-3.4.4-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:840c25fb618a231545cbab0564a799f101b63b9901f2569faecd6b222ac72381", size = 151550, upload-time = "2025-10-14T04:40:40.053Z" }, + { url = "https://files.pythonhosted.org/packages/55/c2/43edd615fdfba8c6f2dfbd459b25a6b3b551f24ea21981e23fb768503ce1/charset_normalizer-3.4.4-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:ca5862d5b3928c4940729dacc329aa9102900382fea192fc5e52eb69d6093815", size = 149162, upload-time = "2025-10-14T04:40:41.163Z" }, + { url = "https://files.pythonhosted.org/packages/03/86/bde4ad8b4d0e9429a4e82c1e8f5c659993a9a863ad62c7df05cf7b678d75/charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d9c7f57c3d666a53421049053eaacdd14bbd0a528e2186fcb2e672effd053bb0", size = 150019, upload-time = "2025-10-14T04:40:42.276Z" }, + { url = "https://files.pythonhosted.org/packages/1f/86/a151eb2af293a7e7bac3a739b81072585ce36ccfb4493039f49f1d3cae8c/charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:277e970e750505ed74c832b4bf75dac7476262ee2a013f5574dd49075879e161", size = 143310, upload-time = "2025-10-14T04:40:43.439Z" }, + { url = "https://files.pythonhosted.org/packages/b5/fe/43dae6144a7e07b87478fdfc4dbe9efd5defb0e7ec29f5f58a55aeef7bf7/charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:31fd66405eaf47bb62e8cd575dc621c56c668f27d46a61d975a249930dd5e2a4", size = 162022, upload-time = "2025-10-14T04:40:44.547Z" }, + { url = "https://files.pythonhosted.org/packages/80/e6/7aab83774f5d2bca81f42ac58d04caf44f0cc2b65fc6db2b3b2e8a05f3b3/charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:0d3d8f15c07f86e9ff82319b3d9ef6f4bf907608f53fe9d92b28ea9ae3d1fd89", size = 149383, upload-time = "2025-10-14T04:40:46.018Z" }, + { url = "https://files.pythonhosted.org/packages/4f/e8/b289173b4edae05c0dde07f69f8db476a0b511eac556dfe0d6bda3c43384/charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:9f7fcd74d410a36883701fafa2482a6af2ff5ba96b9a620e9e0721e28ead5569", size = 159098, upload-time = "2025-10-14T04:40:47.081Z" }, + { url = "https://files.pythonhosted.org/packages/d8/df/fe699727754cae3f8478493c7f45f777b17c3ef0600e28abfec8619eb49c/charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:ebf3e58c7ec8a8bed6d66a75d7fb37b55e5015b03ceae72a8e7c74495551e224", size = 152991, upload-time = "2025-10-14T04:40:48.246Z" }, + { url = "https://files.pythonhosted.org/packages/1a/86/584869fe4ddb6ffa3bd9f491b87a01568797fb9bd8933f557dba9771beaf/charset_normalizer-3.4.4-cp311-cp311-win32.whl", hash = "sha256:eecbc200c7fd5ddb9a7f16c7decb07b566c29fa2161a16cf67b8d068bd21690a", size = 99456, upload-time = "2025-10-14T04:40:49.376Z" }, + { url = "https://files.pythonhosted.org/packages/65/f6/62fdd5feb60530f50f7e38b4f6a1d5203f4d16ff4f9f0952962c044e919a/charset_normalizer-3.4.4-cp311-cp311-win_amd64.whl", hash = "sha256:5ae497466c7901d54b639cf42d5b8c1b6a4fead55215500d2f486d34db48d016", size = 106978, upload-time = "2025-10-14T04:40:50.844Z" }, + { url = "https://files.pythonhosted.org/packages/7a/9d/0710916e6c82948b3be62d9d398cb4fcf4e97b56d6a6aeccd66c4b2f2bd5/charset_normalizer-3.4.4-cp311-cp311-win_arm64.whl", hash = "sha256:65e2befcd84bc6f37095f5961e68a6f077bf44946771354a28ad434c2cce0ae1", size = 99969, upload-time = "2025-10-14T04:40:52.272Z" }, + { url = "https://files.pythonhosted.org/packages/f3/85/1637cd4af66fa687396e757dec650f28025f2a2f5a5531a3208dc0ec43f2/charset_normalizer-3.4.4-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0a98e6759f854bd25a58a73fa88833fba3b7c491169f86ce1180c948ab3fd394", size = 208425, upload-time = "2025-10-14T04:40:53.353Z" }, + { url = "https://files.pythonhosted.org/packages/9d/6a/04130023fef2a0d9c62d0bae2649b69f7b7d8d24ea5536feef50551029df/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b5b290ccc2a263e8d185130284f8501e3e36c5e02750fc6b6bdeb2e9e96f1e25", size = 148162, upload-time = "2025-10-14T04:40:54.558Z" }, + { url = "https://files.pythonhosted.org/packages/78/29/62328d79aa60da22c9e0b9a66539feae06ca0f5a4171ac4f7dc285b83688/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:74bb723680f9f7a6234dcf67aea57e708ec1fbdf5699fb91dfd6f511b0a320ef", size = 144558, upload-time = "2025-10-14T04:40:55.677Z" }, + { url = "https://files.pythonhosted.org/packages/86/bb/b32194a4bf15b88403537c2e120b817c61cd4ecffa9b6876e941c3ee38fe/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f1e34719c6ed0b92f418c7c780480b26b5d9c50349e9a9af7d76bf757530350d", size = 161497, upload-time = "2025-10-14T04:40:57.217Z" }, + { url = "https://files.pythonhosted.org/packages/19/89/a54c82b253d5b9b111dc74aca196ba5ccfcca8242d0fb64146d4d3183ff1/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:2437418e20515acec67d86e12bf70056a33abdacb5cb1655042f6538d6b085a8", size = 159240, upload-time = "2025-10-14T04:40:58.358Z" }, + { url = "https://files.pythonhosted.org/packages/c0/10/d20b513afe03acc89ec33948320a5544d31f21b05368436d580dec4e234d/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:11d694519d7f29d6cd09f6ac70028dba10f92f6cdd059096db198c283794ac86", size = 153471, upload-time = "2025-10-14T04:40:59.468Z" }, + { url = "https://files.pythonhosted.org/packages/61/fa/fbf177b55bdd727010f9c0a3c49eefa1d10f960e5f09d1d887bf93c2e698/charset_normalizer-3.4.4-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:ac1c4a689edcc530fc9d9aa11f5774b9e2f33f9a0c6a57864e90908f5208d30a", size = 150864, upload-time = "2025-10-14T04:41:00.623Z" }, + { url = "https://files.pythonhosted.org/packages/05/12/9fbc6a4d39c0198adeebbde20b619790e9236557ca59fc40e0e3cebe6f40/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:21d142cc6c0ec30d2efee5068ca36c128a30b0f2c53c1c07bd78cb6bc1d3be5f", size = 150647, upload-time = "2025-10-14T04:41:01.754Z" }, + { url = "https://files.pythonhosted.org/packages/ad/1f/6a9a593d52e3e8c5d2b167daf8c6b968808efb57ef4c210acb907c365bc4/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:5dbe56a36425d26d6cfb40ce79c314a2e4dd6211d51d6d2191c00bed34f354cc", size = 145110, upload-time = "2025-10-14T04:41:03.231Z" }, + { url = "https://files.pythonhosted.org/packages/30/42/9a52c609e72471b0fc54386dc63c3781a387bb4fe61c20231a4ebcd58bdd/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:5bfbb1b9acf3334612667b61bd3002196fe2a1eb4dd74d247e0f2a4d50ec9bbf", size = 162839, upload-time = "2025-10-14T04:41:04.715Z" }, + { url = "https://files.pythonhosted.org/packages/c4/5b/c0682bbf9f11597073052628ddd38344a3d673fda35a36773f7d19344b23/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:d055ec1e26e441f6187acf818b73564e6e6282709e9bcb5b63f5b23068356a15", size = 150667, upload-time = "2025-10-14T04:41:05.827Z" }, + { url = "https://files.pythonhosted.org/packages/e4/24/a41afeab6f990cf2daf6cb8c67419b63b48cf518e4f56022230840c9bfb2/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:af2d8c67d8e573d6de5bc30cdb27e9b95e49115cd9baad5ddbd1a6207aaa82a9", size = 160535, upload-time = "2025-10-14T04:41:06.938Z" }, + { url = "https://files.pythonhosted.org/packages/2a/e5/6a4ce77ed243c4a50a1fecca6aaaab419628c818a49434be428fe24c9957/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:780236ac706e66881f3b7f2f32dfe90507a09e67d1d454c762cf642e6e1586e0", size = 154816, upload-time = "2025-10-14T04:41:08.101Z" }, + { url = "https://files.pythonhosted.org/packages/a8/ef/89297262b8092b312d29cdb2517cb1237e51db8ecef2e9af5edbe7b683b1/charset_normalizer-3.4.4-cp312-cp312-win32.whl", hash = "sha256:5833d2c39d8896e4e19b689ffc198f08ea58116bee26dea51e362ecc7cd3ed26", size = 99694, upload-time = "2025-10-14T04:41:09.23Z" }, + { url = "https://files.pythonhosted.org/packages/3d/2d/1e5ed9dd3b3803994c155cd9aacb60c82c331bad84daf75bcb9c91b3295e/charset_normalizer-3.4.4-cp312-cp312-win_amd64.whl", hash = "sha256:a79cfe37875f822425b89a82333404539ae63dbdddf97f84dcbc3d339aae9525", size = 107131, upload-time = "2025-10-14T04:41:10.467Z" }, + { url = "https://files.pythonhosted.org/packages/d0/d9/0ed4c7098a861482a7b6a95603edce4c0d9db2311af23da1fb2b75ec26fc/charset_normalizer-3.4.4-cp312-cp312-win_arm64.whl", hash = "sha256:376bec83a63b8021bb5c8ea75e21c4ccb86e7e45ca4eb81146091b56599b80c3", size = 100390, upload-time = "2025-10-14T04:41:11.915Z" }, + { url = "https://files.pythonhosted.org/packages/97/45/4b3a1239bbacd321068ea6e7ac28875b03ab8bc0aa0966452db17cd36714/charset_normalizer-3.4.4-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:e1f185f86a6f3403aa2420e815904c67b2f9ebc443f045edd0de921108345794", size = 208091, upload-time = "2025-10-14T04:41:13.346Z" }, + { url = "https://files.pythonhosted.org/packages/7d/62/73a6d7450829655a35bb88a88fca7d736f9882a27eacdca2c6d505b57e2e/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6b39f987ae8ccdf0d2642338faf2abb1862340facc796048b604ef14919e55ed", size = 147936, upload-time = "2025-10-14T04:41:14.461Z" }, + { url = "https://files.pythonhosted.org/packages/89/c5/adb8c8b3d6625bef6d88b251bbb0d95f8205831b987631ab0c8bb5d937c2/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:3162d5d8ce1bb98dd51af660f2121c55d0fa541b46dff7bb9b9f86ea1d87de72", size = 144180, upload-time = "2025-10-14T04:41:15.588Z" }, + { url = "https://files.pythonhosted.org/packages/91/ed/9706e4070682d1cc219050b6048bfd293ccf67b3d4f5a4f39207453d4b99/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:81d5eb2a312700f4ecaa977a8235b634ce853200e828fbadf3a9c50bab278328", size = 161346, upload-time = "2025-10-14T04:41:16.738Z" }, + { url = "https://files.pythonhosted.org/packages/d5/0d/031f0d95e4972901a2f6f09ef055751805ff541511dc1252ba3ca1f80cf5/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5bd2293095d766545ec1a8f612559f6b40abc0eb18bb2f5d1171872d34036ede", size = 158874, upload-time = "2025-10-14T04:41:17.923Z" }, + { url = "https://files.pythonhosted.org/packages/f5/83/6ab5883f57c9c801ce5e5677242328aa45592be8a00644310a008d04f922/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a8a8b89589086a25749f471e6a900d3f662d1d3b6e2e59dcecf787b1cc3a1894", size = 153076, upload-time = "2025-10-14T04:41:19.106Z" }, + { url = "https://files.pythonhosted.org/packages/75/1e/5ff781ddf5260e387d6419959ee89ef13878229732732ee73cdae01800f2/charset_normalizer-3.4.4-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:bc7637e2f80d8530ee4a78e878bce464f70087ce73cf7c1caf142416923b98f1", size = 150601, upload-time = "2025-10-14T04:41:20.245Z" }, + { url = "https://files.pythonhosted.org/packages/d7/57/71be810965493d3510a6ca79b90c19e48696fb1ff964da319334b12677f0/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f8bf04158c6b607d747e93949aa60618b61312fe647a6369f88ce2ff16043490", size = 150376, upload-time = "2025-10-14T04:41:21.398Z" }, + { url = "https://files.pythonhosted.org/packages/e5/d5/c3d057a78c181d007014feb7e9f2e65905a6c4ef182c0ddf0de2924edd65/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:554af85e960429cf30784dd47447d5125aaa3b99a6f0683589dbd27e2f45da44", size = 144825, upload-time = "2025-10-14T04:41:22.583Z" }, + { url = "https://files.pythonhosted.org/packages/e6/8c/d0406294828d4976f275ffbe66f00266c4b3136b7506941d87c00cab5272/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:74018750915ee7ad843a774364e13a3db91682f26142baddf775342c3f5b1133", size = 162583, upload-time = "2025-10-14T04:41:23.754Z" }, + { url = "https://files.pythonhosted.org/packages/d7/24/e2aa1f18c8f15c4c0e932d9287b8609dd30ad56dbe41d926bd846e22fb8d/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:c0463276121fdee9c49b98908b3a89c39be45d86d1dbaa22957e38f6321d4ce3", size = 150366, upload-time = "2025-10-14T04:41:25.27Z" }, + { url = "https://files.pythonhosted.org/packages/e4/5b/1e6160c7739aad1e2df054300cc618b06bf784a7a164b0f238360721ab86/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:362d61fd13843997c1c446760ef36f240cf81d3ebf74ac62652aebaf7838561e", size = 160300, upload-time = "2025-10-14T04:41:26.725Z" }, + { url = "https://files.pythonhosted.org/packages/7a/10/f882167cd207fbdd743e55534d5d9620e095089d176d55cb22d5322f2afd/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9a26f18905b8dd5d685d6d07b0cdf98a79f3c7a918906af7cc143ea2e164c8bc", size = 154465, upload-time = "2025-10-14T04:41:28.322Z" }, + { url = "https://files.pythonhosted.org/packages/89/66/c7a9e1b7429be72123441bfdbaf2bc13faab3f90b933f664db506dea5915/charset_normalizer-3.4.4-cp313-cp313-win32.whl", hash = "sha256:9b35f4c90079ff2e2edc5b26c0c77925e5d2d255c42c74fdb70fb49b172726ac", size = 99404, upload-time = "2025-10-14T04:41:29.95Z" }, + { url = "https://files.pythonhosted.org/packages/c4/26/b9924fa27db384bdcd97ab83b4f0a8058d96ad9626ead570674d5e737d90/charset_normalizer-3.4.4-cp313-cp313-win_amd64.whl", hash = "sha256:b435cba5f4f750aa6c0a0d92c541fb79f69a387c91e61f1795227e4ed9cece14", size = 107092, upload-time = "2025-10-14T04:41:31.188Z" }, + { url = "https://files.pythonhosted.org/packages/af/8f/3ed4bfa0c0c72a7ca17f0380cd9e4dd842b09f664e780c13cff1dcf2ef1b/charset_normalizer-3.4.4-cp313-cp313-win_arm64.whl", hash = "sha256:542d2cee80be6f80247095cc36c418f7bddd14f4a6de45af91dfad36d817bba2", size = 100408, upload-time = "2025-10-14T04:41:32.624Z" }, + { url = "https://files.pythonhosted.org/packages/2a/35/7051599bd493e62411d6ede36fd5af83a38f37c4767b92884df7301db25d/charset_normalizer-3.4.4-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:da3326d9e65ef63a817ecbcc0df6e94463713b754fe293eaa03da99befb9a5bd", size = 207746, upload-time = "2025-10-14T04:41:33.773Z" }, + { url = "https://files.pythonhosted.org/packages/10/9a/97c8d48ef10d6cd4fcead2415523221624bf58bcf68a802721a6bc807c8f/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8af65f14dc14a79b924524b1e7fffe304517b2bff5a58bf64f30b98bbc5079eb", size = 147889, upload-time = "2025-10-14T04:41:34.897Z" }, + { url = "https://files.pythonhosted.org/packages/10/bf/979224a919a1b606c82bd2c5fa49b5c6d5727aa47b4312bb27b1734f53cd/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:74664978bb272435107de04e36db5a9735e78232b85b77d45cfb38f758efd33e", size = 143641, upload-time = "2025-10-14T04:41:36.116Z" }, + { url = "https://files.pythonhosted.org/packages/ba/33/0ad65587441fc730dc7bd90e9716b30b4702dc7b617e6ba4997dc8651495/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:752944c7ffbfdd10c074dc58ec2d5a8a4cd9493b314d367c14d24c17684ddd14", size = 160779, upload-time = "2025-10-14T04:41:37.229Z" }, + { url = "https://files.pythonhosted.org/packages/67/ed/331d6b249259ee71ddea93f6f2f0a56cfebd46938bde6fcc6f7b9a3d0e09/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d1f13550535ad8cff21b8d757a3257963e951d96e20ec82ab44bc64aeb62a191", size = 159035, upload-time = "2025-10-14T04:41:38.368Z" }, + { url = "https://files.pythonhosted.org/packages/67/ff/f6b948ca32e4f2a4576aa129d8bed61f2e0543bf9f5f2b7fc3758ed005c9/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ecaae4149d99b1c9e7b88bb03e3221956f68fd6d50be2ef061b2381b61d20838", size = 152542, upload-time = "2025-10-14T04:41:39.862Z" }, + { url = "https://files.pythonhosted.org/packages/16/85/276033dcbcc369eb176594de22728541a925b2632f9716428c851b149e83/charset_normalizer-3.4.4-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:cb6254dc36b47a990e59e1068afacdcd02958bdcce30bb50cc1700a8b9d624a6", size = 149524, upload-time = "2025-10-14T04:41:41.319Z" }, + { url = "https://files.pythonhosted.org/packages/9e/f2/6a2a1f722b6aba37050e626530a46a68f74e63683947a8acff92569f979a/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:c8ae8a0f02f57a6e61203a31428fa1d677cbe50c93622b4149d5c0f319c1d19e", size = 150395, upload-time = "2025-10-14T04:41:42.539Z" }, + { url = "https://files.pythonhosted.org/packages/60/bb/2186cb2f2bbaea6338cad15ce23a67f9b0672929744381e28b0592676824/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:47cc91b2f4dd2833fddaedd2893006b0106129d4b94fdb6af1f4ce5a9965577c", size = 143680, upload-time = "2025-10-14T04:41:43.661Z" }, + { url = "https://files.pythonhosted.org/packages/7d/a5/bf6f13b772fbb2a90360eb620d52ed8f796f3c5caee8398c3b2eb7b1c60d/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:82004af6c302b5d3ab2cfc4cc5f29db16123b1a8417f2e25f9066f91d4411090", size = 162045, upload-time = "2025-10-14T04:41:44.821Z" }, + { url = "https://files.pythonhosted.org/packages/df/c5/d1be898bf0dc3ef9030c3825e5d3b83f2c528d207d246cbabe245966808d/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:2b7d8f6c26245217bd2ad053761201e9f9680f8ce52f0fcd8d0755aeae5b2152", size = 149687, upload-time = "2025-10-14T04:41:46.442Z" }, + { url = "https://files.pythonhosted.org/packages/a5/42/90c1f7b9341eef50c8a1cb3f098ac43b0508413f33affd762855f67a410e/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:799a7a5e4fb2d5898c60b640fd4981d6a25f1c11790935a44ce38c54e985f828", size = 160014, upload-time = "2025-10-14T04:41:47.631Z" }, + { url = "https://files.pythonhosted.org/packages/76/be/4d3ee471e8145d12795ab655ece37baed0929462a86e72372fd25859047c/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:99ae2cffebb06e6c22bdc25801d7b30f503cc87dbd283479e7b606f70aff57ec", size = 154044, upload-time = "2025-10-14T04:41:48.81Z" }, + { url = "https://files.pythonhosted.org/packages/b0/6f/8f7af07237c34a1defe7defc565a9bc1807762f672c0fde711a4b22bf9c0/charset_normalizer-3.4.4-cp314-cp314-win32.whl", hash = "sha256:f9d332f8c2a2fcbffe1378594431458ddbef721c1769d78e2cbc06280d8155f9", size = 99940, upload-time = "2025-10-14T04:41:49.946Z" }, + { url = "https://files.pythonhosted.org/packages/4b/51/8ade005e5ca5b0d80fb4aff72a3775b325bdc3d27408c8113811a7cbe640/charset_normalizer-3.4.4-cp314-cp314-win_amd64.whl", hash = "sha256:8a6562c3700cce886c5be75ade4a5db4214fda19fede41d9792d100288d8f94c", size = 107104, upload-time = "2025-10-14T04:41:51.051Z" }, + { url = "https://files.pythonhosted.org/packages/da/5f/6b8f83a55bb8278772c5ae54a577f3099025f9ade59d0136ac24a0df4bde/charset_normalizer-3.4.4-cp314-cp314-win_arm64.whl", hash = "sha256:de00632ca48df9daf77a2c65a484531649261ec9f25489917f09e455cb09ddb2", size = 100743, upload-time = "2025-10-14T04:41:52.122Z" }, + { url = "https://files.pythonhosted.org/packages/0a/4c/925909008ed5a988ccbb72dcc897407e5d6d3bd72410d69e051fc0c14647/charset_normalizer-3.4.4-py3-none-any.whl", hash = "sha256:7a32c560861a02ff789ad905a2fe94e3f840803362c84fecf1851cb4cf3dc37f", size = 53402, upload-time = "2025-10-14T04:42:31.76Z" }, +] + +[[package]] +name = "click" +version = "8.3.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/3d/fa/656b739db8587d7b5dfa22e22ed02566950fbfbcdc20311993483657a5c0/click-8.3.1.tar.gz", hash = "sha256:12ff4785d337a1bb490bb7e9c2b1ee5da3112e94a8622f26a6c77f5d2fc6842a", size = 295065, upload-time = "2025-11-15T20:45:42.706Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/98/78/01c019cdb5d6498122777c1a43056ebb3ebfeef2076d9d026bfe15583b2b/click-8.3.1-py3-none-any.whl", hash = "sha256:981153a64e25f12d547d3426c367a4857371575ee7ad18df2a6183ab0545b2a6", size = 108274, upload-time = "2025-11-15T20:45:41.139Z" }, +] + +[[package]] +name = "colorama" +version = "0.4.6" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697, upload-time = "2022-10-25T02:36:22.414Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335, upload-time = "2022-10-25T02:36:20.889Z" }, +] + +[[package]] +name = "cryptography" +version = "46.0.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cffi", marker = "platform_python_implementation != 'PyPy'" }, + { name = "typing-extensions", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/9f/33/c00162f49c0e2fe8064a62cb92b93e50c74a72bc370ab92f86112b33ff62/cryptography-46.0.3.tar.gz", hash = "sha256:a8b17438104fed022ce745b362294d9ce35b4c2e45c1d958ad4a4b019285f4a1", size = 749258, upload-time = "2025-10-15T23:18:31.74Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1d/42/9c391dd801d6cf0d561b5890549d4b27bafcc53b39c31a817e69d87c625b/cryptography-46.0.3-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:109d4ddfadf17e8e7779c39f9b18111a09efb969a301a31e987416a0191ed93a", size = 7225004, upload-time = "2025-10-15T23:16:52.239Z" }, + { url = "https://files.pythonhosted.org/packages/1c/67/38769ca6b65f07461eb200e85fc1639b438bdc667be02cf7f2cd6a64601c/cryptography-46.0.3-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:09859af8466b69bc3c27bdf4f5d84a665e0f7ab5088412e9e2ec49758eca5cbc", size = 4296667, upload-time = "2025-10-15T23:16:54.369Z" }, + { url = "https://files.pythonhosted.org/packages/5c/49/498c86566a1d80e978b42f0d702795f69887005548c041636df6ae1ca64c/cryptography-46.0.3-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:01ca9ff2885f3acc98c29f1860552e37f6d7c7d013d7334ff2a9de43a449315d", size = 4450807, upload-time = "2025-10-15T23:16:56.414Z" }, + { url = "https://files.pythonhosted.org/packages/4b/0a/863a3604112174c8624a2ac3c038662d9e59970c7f926acdcfaed8d61142/cryptography-46.0.3-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:6eae65d4c3d33da080cff9c4ab1f711b15c1d9760809dad6ea763f3812d254cb", size = 4299615, upload-time = "2025-10-15T23:16:58.442Z" }, + { url = "https://files.pythonhosted.org/packages/64/02/b73a533f6b64a69f3cd3872acb6ebc12aef924d8d103133bb3ea750dc703/cryptography-46.0.3-cp311-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:e5bf0ed4490068a2e72ac03d786693adeb909981cc596425d09032d372bcc849", size = 4016800, upload-time = "2025-10-15T23:17:00.378Z" }, + { url = "https://files.pythonhosted.org/packages/25/d5/16e41afbfa450cde85a3b7ec599bebefaef16b5c6ba4ec49a3532336ed72/cryptography-46.0.3-cp311-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:5ecfccd2329e37e9b7112a888e76d9feca2347f12f37918facbb893d7bb88ee8", size = 4984707, upload-time = "2025-10-15T23:17:01.98Z" }, + { url = "https://files.pythonhosted.org/packages/c9/56/e7e69b427c3878352c2fb9b450bd0e19ed552753491d39d7d0a2f5226d41/cryptography-46.0.3-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:a2c0cd47381a3229c403062f764160d57d4d175e022c1df84e168c6251a22eec", size = 4482541, upload-time = "2025-10-15T23:17:04.078Z" }, + { url = "https://files.pythonhosted.org/packages/78/f6/50736d40d97e8483172f1bb6e698895b92a223dba513b0ca6f06b2365339/cryptography-46.0.3-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:549e234ff32571b1f4076ac269fcce7a808d3bf98b76c8dd560e42dbc66d7d91", size = 4299464, upload-time = "2025-10-15T23:17:05.483Z" }, + { url = "https://files.pythonhosted.org/packages/00/de/d8e26b1a855f19d9994a19c702fa2e93b0456beccbcfe437eda00e0701f2/cryptography-46.0.3-cp311-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:c0a7bb1a68a5d3471880e264621346c48665b3bf1c3759d682fc0864c540bd9e", size = 4950838, upload-time = "2025-10-15T23:17:07.425Z" }, + { url = "https://files.pythonhosted.org/packages/8f/29/798fc4ec461a1c9e9f735f2fc58741b0daae30688f41b2497dcbc9ed1355/cryptography-46.0.3-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:10b01676fc208c3e6feeb25a8b83d81767e8059e1fe86e1dc62d10a3018fa926", size = 4481596, upload-time = "2025-10-15T23:17:09.343Z" }, + { url = "https://files.pythonhosted.org/packages/15/8d/03cd48b20a573adfff7652b76271078e3045b9f49387920e7f1f631d125e/cryptography-46.0.3-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:0abf1ffd6e57c67e92af68330d05760b7b7efb243aab8377e583284dbab72c71", size = 4426782, upload-time = "2025-10-15T23:17:11.22Z" }, + { url = "https://files.pythonhosted.org/packages/fa/b1/ebacbfe53317d55cf33165bda24c86523497a6881f339f9aae5c2e13e57b/cryptography-46.0.3-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:a04bee9ab6a4da801eb9b51f1b708a1b5b5c9eb48c03f74198464c66f0d344ac", size = 4698381, upload-time = "2025-10-15T23:17:12.829Z" }, + { url = "https://files.pythonhosted.org/packages/96/92/8a6a9525893325fc057a01f654d7efc2c64b9de90413adcf605a85744ff4/cryptography-46.0.3-cp311-abi3-win32.whl", hash = "sha256:f260d0d41e9b4da1ed1e0f1ce571f97fe370b152ab18778e9e8f67d6af432018", size = 3055988, upload-time = "2025-10-15T23:17:14.65Z" }, + { url = "https://files.pythonhosted.org/packages/7e/bf/80fbf45253ea585a1e492a6a17efcb93467701fa79e71550a430c5e60df0/cryptography-46.0.3-cp311-abi3-win_amd64.whl", hash = "sha256:a9a3008438615669153eb86b26b61e09993921ebdd75385ddd748702c5adfddb", size = 3514451, upload-time = "2025-10-15T23:17:16.142Z" }, + { url = "https://files.pythonhosted.org/packages/2e/af/9b302da4c87b0beb9db4e756386a7c6c5b8003cd0e742277888d352ae91d/cryptography-46.0.3-cp311-abi3-win_arm64.whl", hash = "sha256:5d7f93296ee28f68447397bf5198428c9aeeab45705a55d53a6343455dcb2c3c", size = 2928007, upload-time = "2025-10-15T23:17:18.04Z" }, + { url = "https://files.pythonhosted.org/packages/f5/e2/a510aa736755bffa9d2f75029c229111a1d02f8ecd5de03078f4c18d91a3/cryptography-46.0.3-cp314-cp314t-macosx_10_9_universal2.whl", hash = "sha256:00a5e7e87938e5ff9ff5447ab086a5706a957137e6e433841e9d24f38a065217", size = 7158012, upload-time = "2025-10-15T23:17:19.982Z" }, + { url = "https://files.pythonhosted.org/packages/73/dc/9aa866fbdbb95b02e7f9d086f1fccfeebf8953509b87e3f28fff927ff8a0/cryptography-46.0.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:c8daeb2d2174beb4575b77482320303f3d39b8e81153da4f0fb08eb5fe86a6c5", size = 4288728, upload-time = "2025-10-15T23:17:21.527Z" }, + { url = "https://files.pythonhosted.org/packages/c5/fd/bc1daf8230eaa075184cbbf5f8cd00ba9db4fd32d63fb83da4671b72ed8a/cryptography-46.0.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:39b6755623145ad5eff1dab323f4eae2a32a77a7abef2c5089a04a3d04366715", size = 4435078, upload-time = "2025-10-15T23:17:23.042Z" }, + { url = "https://files.pythonhosted.org/packages/82/98/d3bd5407ce4c60017f8ff9e63ffee4200ab3e23fe05b765cab805a7db008/cryptography-46.0.3-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:db391fa7c66df6762ee3f00c95a89e6d428f4d60e7abc8328f4fe155b5ac6e54", size = 4293460, upload-time = "2025-10-15T23:17:24.885Z" }, + { url = "https://files.pythonhosted.org/packages/26/e9/e23e7900983c2b8af7a08098db406cf989d7f09caea7897e347598d4cd5b/cryptography-46.0.3-cp314-cp314t-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:78a97cf6a8839a48c49271cdcbd5cf37ca2c1d6b7fdd86cc864f302b5e9bf459", size = 3995237, upload-time = "2025-10-15T23:17:26.449Z" }, + { url = "https://files.pythonhosted.org/packages/91/15/af68c509d4a138cfe299d0d7ddb14afba15233223ebd933b4bbdbc7155d3/cryptography-46.0.3-cp314-cp314t-manylinux_2_28_ppc64le.whl", hash = "sha256:dfb781ff7eaa91a6f7fd41776ec37c5853c795d3b358d4896fdbb5df168af422", size = 4967344, upload-time = "2025-10-15T23:17:28.06Z" }, + { url = "https://files.pythonhosted.org/packages/ca/e3/8643d077c53868b681af077edf6b3cb58288b5423610f21c62aadcbe99f4/cryptography-46.0.3-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:6f61efb26e76c45c4a227835ddeae96d83624fb0d29eb5df5b96e14ed1a0afb7", size = 4466564, upload-time = "2025-10-15T23:17:29.665Z" }, + { url = "https://files.pythonhosted.org/packages/0e/43/c1e8726fa59c236ff477ff2b5dc071e54b21e5a1e51aa2cee1676f1c986f/cryptography-46.0.3-cp314-cp314t-manylinux_2_34_aarch64.whl", hash = "sha256:23b1a8f26e43f47ceb6d6a43115f33a5a37d57df4ea0ca295b780ae8546e8044", size = 4292415, upload-time = "2025-10-15T23:17:31.686Z" }, + { url = "https://files.pythonhosted.org/packages/42/f9/2f8fefdb1aee8a8e3256a0568cffc4e6d517b256a2fe97a029b3f1b9fe7e/cryptography-46.0.3-cp314-cp314t-manylinux_2_34_ppc64le.whl", hash = "sha256:b419ae593c86b87014b9be7396b385491ad7f320bde96826d0dd174459e54665", size = 4931457, upload-time = "2025-10-15T23:17:33.478Z" }, + { url = "https://files.pythonhosted.org/packages/79/30/9b54127a9a778ccd6d27c3da7563e9f2d341826075ceab89ae3b41bf5be2/cryptography-46.0.3-cp314-cp314t-manylinux_2_34_x86_64.whl", hash = "sha256:50fc3343ac490c6b08c0cf0d704e881d0d660be923fd3076db3e932007e726e3", size = 4466074, upload-time = "2025-10-15T23:17:35.158Z" }, + { url = "https://files.pythonhosted.org/packages/ac/68/b4f4a10928e26c941b1b6a179143af9f4d27d88fe84a6a3c53592d2e76bf/cryptography-46.0.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:22d7e97932f511d6b0b04f2bfd818d73dcd5928db509460aaf48384778eb6d20", size = 4420569, upload-time = "2025-10-15T23:17:37.188Z" }, + { url = "https://files.pythonhosted.org/packages/a3/49/3746dab4c0d1979888f125226357d3262a6dd40e114ac29e3d2abdf1ec55/cryptography-46.0.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:d55f3dffadd674514ad19451161118fd010988540cee43d8bc20675e775925de", size = 4681941, upload-time = "2025-10-15T23:17:39.236Z" }, + { url = "https://files.pythonhosted.org/packages/fd/30/27654c1dbaf7e4a3531fa1fc77986d04aefa4d6d78259a62c9dc13d7ad36/cryptography-46.0.3-cp314-cp314t-win32.whl", hash = "sha256:8a6e050cb6164d3f830453754094c086ff2d0b2f3a897a1d9820f6139a1f0914", size = 3022339, upload-time = "2025-10-15T23:17:40.888Z" }, + { url = "https://files.pythonhosted.org/packages/f6/30/640f34ccd4d2a1bc88367b54b926b781b5a018d65f404d409aba76a84b1c/cryptography-46.0.3-cp314-cp314t-win_amd64.whl", hash = "sha256:760f83faa07f8b64e9c33fc963d790a2edb24efb479e3520c14a45741cd9b2db", size = 3494315, upload-time = "2025-10-15T23:17:42.769Z" }, + { url = "https://files.pythonhosted.org/packages/ba/8b/88cc7e3bd0a8e7b861f26981f7b820e1f46aa9d26cc482d0feba0ecb4919/cryptography-46.0.3-cp314-cp314t-win_arm64.whl", hash = "sha256:516ea134e703e9fe26bcd1277a4b59ad30586ea90c365a87781d7887a646fe21", size = 2919331, upload-time = "2025-10-15T23:17:44.468Z" }, + { url = "https://files.pythonhosted.org/packages/fd/23/45fe7f376a7df8daf6da3556603b36f53475a99ce4faacb6ba2cf3d82021/cryptography-46.0.3-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:cb3d760a6117f621261d662bccc8ef5bc32ca673e037c83fbe565324f5c46936", size = 7218248, upload-time = "2025-10-15T23:17:46.294Z" }, + { url = "https://files.pythonhosted.org/packages/27/32/b68d27471372737054cbd34c84981f9edbc24fe67ca225d389799614e27f/cryptography-46.0.3-cp38-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:4b7387121ac7d15e550f5cb4a43aef2559ed759c35df7336c402bb8275ac9683", size = 4294089, upload-time = "2025-10-15T23:17:48.269Z" }, + { url = "https://files.pythonhosted.org/packages/26/42/fa8389d4478368743e24e61eea78846a0006caffaf72ea24a15159215a14/cryptography-46.0.3-cp38-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:15ab9b093e8f09daab0f2159bb7e47532596075139dd74365da52ecc9cb46c5d", size = 4440029, upload-time = "2025-10-15T23:17:49.837Z" }, + { url = "https://files.pythonhosted.org/packages/5f/eb/f483db0ec5ac040824f269e93dd2bd8a21ecd1027e77ad7bdf6914f2fd80/cryptography-46.0.3-cp38-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:46acf53b40ea38f9c6c229599a4a13f0d46a6c3fa9ef19fc1a124d62e338dfa0", size = 4297222, upload-time = "2025-10-15T23:17:51.357Z" }, + { url = "https://files.pythonhosted.org/packages/fd/cf/da9502c4e1912cb1da3807ea3618a6829bee8207456fbbeebc361ec38ba3/cryptography-46.0.3-cp38-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:10ca84c4668d066a9878890047f03546f3ae0a6b8b39b697457b7757aaf18dbc", size = 4012280, upload-time = "2025-10-15T23:17:52.964Z" }, + { url = "https://files.pythonhosted.org/packages/6b/8f/9adb86b93330e0df8b3dcf03eae67c33ba89958fc2e03862ef1ac2b42465/cryptography-46.0.3-cp38-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:36e627112085bb3b81b19fed209c05ce2a52ee8b15d161b7c643a7d5a88491f3", size = 4978958, upload-time = "2025-10-15T23:17:54.965Z" }, + { url = "https://files.pythonhosted.org/packages/d1/a0/5fa77988289c34bdb9f913f5606ecc9ada1adb5ae870bd0d1054a7021cc4/cryptography-46.0.3-cp38-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:1000713389b75c449a6e979ffc7dcc8ac90b437048766cef052d4d30b8220971", size = 4473714, upload-time = "2025-10-15T23:17:56.754Z" }, + { url = "https://files.pythonhosted.org/packages/14/e5/fc82d72a58d41c393697aa18c9abe5ae1214ff6f2a5c18ac470f92777895/cryptography-46.0.3-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:b02cf04496f6576afffef5ddd04a0cb7d49cf6be16a9059d793a30b035f6b6ac", size = 4296970, upload-time = "2025-10-15T23:17:58.588Z" }, + { url = "https://files.pythonhosted.org/packages/78/06/5663ed35438d0b09056973994f1aec467492b33bd31da36e468b01ec1097/cryptography-46.0.3-cp38-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:71e842ec9bc7abf543b47cf86b9a743baa95f4677d22baa4c7d5c69e49e9bc04", size = 4940236, upload-time = "2025-10-15T23:18:00.897Z" }, + { url = "https://files.pythonhosted.org/packages/fc/59/873633f3f2dcd8a053b8dd1d38f783043b5fce589c0f6988bf55ef57e43e/cryptography-46.0.3-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:402b58fc32614f00980b66d6e56a5b4118e6cb362ae8f3fda141ba4689bd4506", size = 4472642, upload-time = "2025-10-15T23:18:02.749Z" }, + { url = "https://files.pythonhosted.org/packages/3d/39/8e71f3930e40f6877737d6f69248cf74d4e34b886a3967d32f919cc50d3b/cryptography-46.0.3-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:ef639cb3372f69ec44915fafcd6698b6cc78fbe0c2ea41be867f6ed612811963", size = 4423126, upload-time = "2025-10-15T23:18:04.85Z" }, + { url = "https://files.pythonhosted.org/packages/cd/c7/f65027c2810e14c3e7268353b1681932b87e5a48e65505d8cc17c99e36ae/cryptography-46.0.3-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:3b51b8ca4f1c6453d8829e1eb7299499ca7f313900dd4d89a24b8b87c0a780d4", size = 4686573, upload-time = "2025-10-15T23:18:06.908Z" }, + { url = "https://files.pythonhosted.org/packages/0a/6e/1c8331ddf91ca4730ab3086a0f1be19c65510a33b5a441cb334e7a2d2560/cryptography-46.0.3-cp38-abi3-win32.whl", hash = "sha256:6276eb85ef938dc035d59b87c8a7dc559a232f954962520137529d77b18ff1df", size = 3036695, upload-time = "2025-10-15T23:18:08.672Z" }, + { url = "https://files.pythonhosted.org/packages/90/45/b0d691df20633eff80955a0fc7695ff9051ffce8b69741444bd9ed7bd0db/cryptography-46.0.3-cp38-abi3-win_amd64.whl", hash = "sha256:416260257577718c05135c55958b674000baef9a1c7d9e8f306ec60d71db850f", size = 3501720, upload-time = "2025-10-15T23:18:10.632Z" }, + { url = "https://files.pythonhosted.org/packages/e8/cb/2da4cc83f5edb9c3257d09e1e7ab7b23f049c7962cae8d842bbef0a9cec9/cryptography-46.0.3-cp38-abi3-win_arm64.whl", hash = "sha256:d89c3468de4cdc4f08a57e214384d0471911a3830fcdaf7a8cc587e42a866372", size = 2918740, upload-time = "2025-10-15T23:18:12.277Z" }, + { url = "https://files.pythonhosted.org/packages/d9/cd/1a8633802d766a0fa46f382a77e096d7e209e0817892929655fe0586ae32/cryptography-46.0.3-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:a23582810fedb8c0bc47524558fb6c56aac3fc252cb306072fd2815da2a47c32", size = 3689163, upload-time = "2025-10-15T23:18:13.821Z" }, + { url = "https://files.pythonhosted.org/packages/4c/59/6b26512964ace6480c3e54681a9859c974172fb141c38df11eadd8416947/cryptography-46.0.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:e7aec276d68421f9574040c26e2a7c3771060bc0cff408bae1dcb19d3ab1e63c", size = 3429474, upload-time = "2025-10-15T23:18:15.477Z" }, + { url = "https://files.pythonhosted.org/packages/06/8a/e60e46adab4362a682cf142c7dcb5bf79b782ab2199b0dcb81f55970807f/cryptography-46.0.3-pp311-pypy311_pp73-macosx_10_9_x86_64.whl", hash = "sha256:7ce938a99998ed3c8aa7e7272dca1a610401ede816d36d0693907d863b10d9ea", size = 3698132, upload-time = "2025-10-15T23:18:17.056Z" }, + { url = "https://files.pythonhosted.org/packages/da/38/f59940ec4ee91e93d3311f7532671a5cef5570eb04a144bf203b58552d11/cryptography-46.0.3-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:191bb60a7be5e6f54e30ba16fdfae78ad3a342a0599eb4193ba88e3f3d6e185b", size = 4243992, upload-time = "2025-10-15T23:18:18.695Z" }, + { url = "https://files.pythonhosted.org/packages/b0/0c/35b3d92ddebfdfda76bb485738306545817253d0a3ded0bfe80ef8e67aa5/cryptography-46.0.3-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:c70cc23f12726be8f8bc72e41d5065d77e4515efae3690326764ea1b07845cfb", size = 4409944, upload-time = "2025-10-15T23:18:20.597Z" }, + { url = "https://files.pythonhosted.org/packages/99/55/181022996c4063fc0e7666a47049a1ca705abb9c8a13830f074edb347495/cryptography-46.0.3-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:9394673a9f4de09e28b5356e7fff97d778f8abad85c9d5ac4a4b7e25a0de7717", size = 4242957, upload-time = "2025-10-15T23:18:22.18Z" }, + { url = "https://files.pythonhosted.org/packages/ba/af/72cd6ef29f9c5f731251acadaeb821559fe25f10852f44a63374c9ca08c1/cryptography-46.0.3-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:94cd0549accc38d1494e1f8de71eca837d0509d0d44bf11d158524b0e12cebf9", size = 4409447, upload-time = "2025-10-15T23:18:24.209Z" }, + { url = "https://files.pythonhosted.org/packages/0d/c3/e90f4a4feae6410f914f8ebac129b9ae7a8c92eb60a638012dde42030a9d/cryptography-46.0.3-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:6b5063083824e5509fdba180721d55909ffacccc8adbec85268b48439423d78c", size = 3438528, upload-time = "2025-10-15T23:18:26.227Z" }, +] + +[[package]] +name = "dill" +version = "0.4.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/12/80/630b4b88364e9a8c8c5797f4602d0f76ef820909ee32f0bacb9f90654042/dill-0.4.0.tar.gz", hash = "sha256:0633f1d2df477324f53a895b02c901fb961bdbf65a17122586ea7019292cbcf0", size = 186976, upload-time = "2025-04-16T00:41:48.867Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/50/3d/9373ad9c56321fdab5b41197068e1d8c25883b3fea29dd361f9b55116869/dill-0.4.0-py3-none-any.whl", hash = "sha256:44f54bf6412c2c8464c14e8243eb163690a9800dbe2c367330883b19c7561049", size = 119668, upload-time = "2025-04-16T00:41:47.671Z" }, +] + +[[package]] +name = "eval-type-backport" +version = "0.3.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/fb/a3/cafafb4558fd638aadfe4121dc6cefb8d743368c085acb2f521df0f3d9d7/eval_type_backport-0.3.1.tar.gz", hash = "sha256:57e993f7b5b69d271e37482e62f74e76a0276c82490cf8e4f0dffeb6b332d5ed", size = 9445, upload-time = "2025-12-02T11:51:42.987Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cf/22/fdc2e30d43ff853720042fa15baa3e6122722be1a7950a98233ebb55cd71/eval_type_backport-0.3.1-py3-none-any.whl", hash = "sha256:279ab641905e9f11129f56a8a78f493518515b83402b860f6f06dd7c011fdfa8", size = 6063, upload-time = "2025-12-02T11:51:41.665Z" }, +] + +[[package]] +name = "exceptiongroup" +version = "1.3.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/50/79/66800aadf48771f6b62f7eb014e352e5d06856655206165d775e675a02c9/exceptiongroup-1.3.1.tar.gz", hash = "sha256:8b412432c6055b0b7d14c310000ae93352ed6754f70fa8f7c34141f91c4e3219", size = 30371, upload-time = "2025-11-21T23:01:54.787Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8a/0e/97c33bf5009bdbac74fd2beace167cab3f978feb69cc36f1ef79360d6c4e/exceptiongroup-1.3.1-py3-none-any.whl", hash = "sha256:a7a39a3bd276781e98394987d3a5701d0c4edffb633bb7a5144577f82c773598", size = 16740, upload-time = "2025-11-21T23:01:53.443Z" }, +] + +[[package]] +name = "google-auth" +version = "2.45.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cachetools" }, + { name = "pyasn1-modules" }, + { name = "rsa" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e5/00/3c794502a8b892c404b2dea5b3650eb21bfc7069612fbfd15c7f17c1cb0d/google_auth-2.45.0.tar.gz", hash = "sha256:90d3f41b6b72ea72dd9811e765699ee491ab24139f34ebf1ca2b9cc0c38708f3", size = 320708, upload-time = "2025-12-15T22:58:42.889Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c6/97/451d55e05487a5cd6279a01a7e34921858b16f7dc8aa38a2c684743cd2b3/google_auth-2.45.0-py2.py3-none-any.whl", hash = "sha256:82344e86dc00410ef5382d99be677c6043d72e502b625aa4f4afa0bdacca0f36", size = 233312, upload-time = "2025-12-15T22:58:40.777Z" }, +] + +[[package]] +name = "googleapis-common-protos" +version = "1.72.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "protobuf" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e5/7b/adfd75544c415c487b33061fe7ae526165241c1ea133f9a9125a56b39fd8/googleapis_common_protos-1.72.0.tar.gz", hash = "sha256:e55a601c1b32b52d7a3e65f43563e2aa61bcd737998ee672ac9b951cd49319f5", size = 147433, upload-time = "2025-11-06T18:29:24.087Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c4/ab/09169d5a4612a5f92490806649ac8d41e3ec9129c636754575b3553f4ea4/googleapis_common_protos-1.72.0-py3-none-any.whl", hash = "sha256:4299c5a82d5ae1a9702ada957347726b167f9f8d1fc352477702a1e851ff4038", size = 297515, upload-time = "2025-11-06T18:29:13.14Z" }, +] + +[[package]] +name = "griffe" +version = "1.15.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/0d/0c/3a471b6e31951dce2360477420d0a8d1e00dea6cf33b70f3e8c3ab6e28e1/griffe-1.15.0.tar.gz", hash = "sha256:7726e3afd6f298fbc3696e67958803e7ac843c1cfe59734b6251a40cdbfb5eea", size = 424112, upload-time = "2025-11-10T15:03:15.52Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9c/83/3b1d03d36f224edded98e9affd0467630fc09d766c0e56fb1498cbb04a9b/griffe-1.15.0-py3-none-any.whl", hash = "sha256:6f6762661949411031f5fcda9593f586e6ce8340f0ba88921a0f2ef7a81eb9a3", size = 150705, upload-time = "2025-11-10T15:03:13.549Z" }, +] + +[[package]] +name = "h11" +version = "0.16.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/01/ee/02a2c011bdab74c6fb3c75474d40b3052059d95df7e73351460c8588d963/h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1", size = 101250, upload-time = "2025-04-24T03:35:25.427Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/04/4b/29cac41a4d98d144bf5f6d33995617b185d14b22401f75ca86f384e87ff1/h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86", size = 37515, upload-time = "2025-04-24T03:35:24.344Z" }, +] + +[[package]] +name = "httpcore" +version = "1.0.9" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "h11" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/06/94/82699a10bca87a5556c9c59b5963f2d039dbd239f25bc2a63907a05a14cb/httpcore-1.0.9.tar.gz", hash = "sha256:6e34463af53fd2ab5d807f399a9b45ea31c3dfa2276f15a2c3f00afff6e176e8", size = 85484, upload-time = "2025-04-24T22:06:22.219Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7e/f5/f66802a942d491edb555dd61e3a9961140fd64c90bce1eafd741609d334d/httpcore-1.0.9-py3-none-any.whl", hash = "sha256:2d400746a40668fc9dec9810239072b40b4484b640a8c38fd654a024c7a1bf55", size = 78784, upload-time = "2025-04-24T22:06:20.566Z" }, +] + +[[package]] +name = "httpx" +version = "0.28.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "certifi" }, + { name = "httpcore" }, + { name = "idna" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b1/df/48c586a5fe32a0f01324ee087459e112ebb7224f646c0b5023f5e79e9956/httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc", size = 141406, upload-time = "2024-12-06T15:37:23.222Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2a/39/e50c7c3a983047577ee07d2a9e53faf5a69493943ec3f6a384bdc792deb2/httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad", size = 73517, upload-time = "2024-12-06T15:37:21.509Z" }, +] + +[[package]] +name = "httpx-sse" +version = "0.4.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/0f/4c/751061ffa58615a32c31b2d82e8482be8dd4a89154f003147acee90f2be9/httpx_sse-0.4.3.tar.gz", hash = "sha256:9b1ed0127459a66014aec3c56bebd93da3c1bc8bb6618c8082039a44889a755d", size = 15943, upload-time = "2025-10-10T21:48:22.271Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d2/fd/6668e5aec43ab844de6fc74927e155a3b37bf40d7c3790e49fc0406b6578/httpx_sse-0.4.3-py3-none-any.whl", hash = "sha256:0ac1c9fe3c0afad2e0ebb25a934a59f4c7823b60792691f779fad2c5568830fc", size = 8960, upload-time = "2025-10-10T21:48:21.158Z" }, +] + +[[package]] +name = "idna" +version = "3.11" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6f/6d/0703ccc57f3a7233505399edb88de3cbd678da106337b9fcde432b65ed60/idna-3.11.tar.gz", hash = "sha256:795dafcc9c04ed0c1fb032c2aa73654d8e8c5023a7df64a53f39190ada629902", size = 194582, upload-time = "2025-10-12T14:55:20.501Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0e/61/66938bbb5fc52dbdf84594873d5b51fb1f7c7794e9c0f5bd885f30bc507b/idna-3.11-py3-none-any.whl", hash = "sha256:771a87f49d9defaf64091e6e6fe9c18d4833f140bd19464795bc32d966ca37ea", size = 71008, upload-time = "2025-10-12T14:55:18.883Z" }, +] + +[[package]] +name = "importlib-metadata" +version = "8.7.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "zipp" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f3/49/3b30cad09e7771a4982d9975a8cbf64f00d4a1ececb53297f1d9a7be1b10/importlib_metadata-8.7.1.tar.gz", hash = "sha256:49fef1ae6440c182052f407c8d34a68f72efc36db9ca90dc0113398f2fdde8bb", size = 57107, upload-time = "2025-12-21T10:00:19.278Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fa/5e/f8e9a1d23b9c20a551a8a02ea3637b4642e22c2626e3a13a9a29cdea99eb/importlib_metadata-8.7.1-py3-none-any.whl", hash = "sha256:5a1f80bf1daa489495071efbb095d75a634cf28a8bc299581244063b53176151", size = 27865, upload-time = "2025-12-21T10:00:18.329Z" }, +] + +[[package]] +name = "iniconfig" +version = "2.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/72/34/14ca021ce8e5dfedc35312d08ba8bf51fdd999c576889fc2c24cb97f4f10/iniconfig-2.3.0.tar.gz", hash = "sha256:c76315c77db068650d49c5b56314774a7804df16fee4402c1f19d6d15d8c4730", size = 20503, upload-time = "2025-10-18T21:55:43.219Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cb/b1/3846dd7f199d53cb17f49cba7e651e9ce294d8497c8c150530ed11865bb8/iniconfig-2.3.0-py3-none-any.whl", hash = "sha256:f631c04d2c48c52b84d0d0549c99ff3859c98df65b3101406327ecc7d53fbf12", size = 7484, upload-time = "2025-10-18T21:55:41.639Z" }, +] + +[[package]] +name = "invoke" +version = "2.2.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/de/bd/b461d3424a24c80490313fd77feeb666ca4f6a28c7e72713e3d9095719b4/invoke-2.2.1.tar.gz", hash = "sha256:515bf49b4a48932b79b024590348da22f39c4942dff991ad1fb8b8baea1be707", size = 304762, upload-time = "2025-10-11T00:36:35.172Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/32/4b/b99e37f88336009971405cbb7630610322ed6fbfa31e1d7ab3fbf3049a2d/invoke-2.2.1-py3-none-any.whl", hash = "sha256:2413bc441b376e5cd3f55bb5d364f973ad8bdd7bf87e53c79de3c11bf3feecc8", size = 160287, upload-time = "2025-10-11T00:36:33.703Z" }, +] + +[[package]] +name = "isort" +version = "5.13.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/87/f9/c1eb8635a24e87ade2efce21e3ce8cd6b8630bb685ddc9cdaca1349b2eb5/isort-5.13.2.tar.gz", hash = "sha256:48fdfcb9face5d58a4f6dde2e72a1fb8dcaf8ab26f95ab49fab84c2ddefb0109", size = 175303, upload-time = "2023-12-13T20:37:26.124Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/b3/8def84f539e7d2289a02f0524b944b15d7c75dab7628bedf1c4f0992029c/isort-5.13.2-py3-none-any.whl", hash = "sha256:8ca5e72a8d85860d5a3fa69b8745237f2939afe12dbf656afbcb47fe72d947a6", size = 92310, upload-time = "2023-12-13T20:37:23.244Z" }, +] + +[[package]] +name = "jsonschema" +version = "4.25.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "attrs" }, + { name = "jsonschema-specifications" }, + { name = "referencing" }, + { name = "rpds-py" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/74/69/f7185de793a29082a9f3c7728268ffb31cb5095131a9c139a74078e27336/jsonschema-4.25.1.tar.gz", hash = "sha256:e4a9655ce0da0c0b67a085847e00a3a51449e1157f4f75e9fb5aa545e122eb85", size = 357342, upload-time = "2025-08-18T17:03:50.038Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bf/9c/8c95d856233c1f82500c2450b8c68576b4cf1c871db3afac5c34ff84e6fd/jsonschema-4.25.1-py3-none-any.whl", hash = "sha256:3fba0169e345c7175110351d456342c364814cfcf3b964ba4587f22915230a63", size = 90040, upload-time = "2025-08-18T17:03:48.373Z" }, +] + +[[package]] +name = "jsonschema-specifications" +version = "2025.9.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "referencing" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/19/74/a633ee74eb36c44aa6d1095e7cc5569bebf04342ee146178e2d36600708b/jsonschema_specifications-2025.9.1.tar.gz", hash = "sha256:b540987f239e745613c7a9176f3edb72b832a4ac465cf02712288397832b5e8d", size = 32855, upload-time = "2025-09-08T01:34:59.186Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/41/45/1a4ed80516f02155c51f51e8cedb3c1902296743db0bbc66608a0db2814f/jsonschema_specifications-2025.9.1-py3-none-any.whl", hash = "sha256:98802fee3a11ee76ecaca44429fda8a41bff98b00a0f2838151b113f210cc6fe", size = 18437, upload-time = "2025-09-08T01:34:57.871Z" }, +] + +[[package]] +name = "mccabe" +version = "0.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e7/ff/0ffefdcac38932a54d2b5eed4e0ba8a408f215002cd178ad1df0f2806ff8/mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325", size = 9658, upload-time = "2022-01-24T01:14:51.113Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/27/1a/1f68f9ba0c207934b35b86a8ca3aad8395a3d6dd7921c0686e23853ff5a9/mccabe-0.7.0-py2.py3-none-any.whl", hash = "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e", size = 7350, upload-time = "2022-01-24T01:14:49.62Z" }, +] + +[[package]] +name = "mcp" +version = "1.25.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "httpx" }, + { name = "httpx-sse" }, + { name = "jsonschema" }, + { name = "pydantic" }, + { name = "pydantic-settings" }, + { name = "pyjwt", extra = ["crypto"] }, + { name = "python-multipart" }, + { name = "pywin32", marker = "sys_platform == 'win32'" }, + { name = "sse-starlette" }, + { name = "starlette" }, + { name = "typing-extensions" }, + { name = "typing-inspection" }, + { name = "uvicorn", marker = "sys_platform != 'emscripten'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/d5/2d/649d80a0ecf6a1f82632ca44bec21c0461a9d9fc8934d38cb5b319f2db5e/mcp-1.25.0.tar.gz", hash = "sha256:56310361ebf0364e2d438e5b45f7668cbb124e158bb358333cd06e49e83a6802", size = 605387, upload-time = "2025-12-19T10:19:56.985Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e2/fc/6dc7659c2ae5ddf280477011f4213a74f806862856b796ef08f028e664bf/mcp-1.25.0-py3-none-any.whl", hash = "sha256:b37c38144a666add0862614cc79ec276e97d72aa8ca26d622818d4e278b9721a", size = 233076, upload-time = "2025-12-19T10:19:55.416Z" }, +] + +[[package]] +name = "mistralai" +version = "2.0.0rc1" +source = { editable = "." } +dependencies = [ + { name = "eval-type-backport" }, + { name = "httpx" }, + { name = "invoke" }, + { name = "opentelemetry-api" }, + { name = "opentelemetry-exporter-otlp-proto-http" }, + { name = "opentelemetry-sdk" }, + { name = "opentelemetry-semantic-conventions" }, + { name = "pydantic" }, + { name = "python-dateutil" }, + { name = "pyyaml" }, + { name = "typing-inspection" }, +] + +[package.optional-dependencies] +agents = [ + { name = "authlib" }, + { name = "griffe" }, + { name = "mcp" }, +] +gcp = [ + { name = "google-auth" }, + { name = "requests" }, +] +realtime = [ + { name = "websockets" }, +] + +[package.dev-dependencies] +dev = [ + { name = "authlib" }, + { name = "griffe" }, + { name = "mcp" }, + { name = "mypy" }, + { name = "pylint" }, + { name = "pytest" }, + { name = "pytest-asyncio" }, + { name = "types-authlib" }, + { name = "types-python-dateutil" }, + { name = "types-pyyaml" }, + { name = "websockets" }, +] +lint = [ + { name = "mypy" }, + { name = "pyright" }, + { name = "ruff" }, +] + +[package.metadata] +requires-dist = [ + { name = "authlib", marker = "extra == 'agents'", specifier = ">=1.5.2,<2.0" }, + { name = "eval-type-backport", specifier = ">=0.2.0" }, + { name = "google-auth", marker = "extra == 'gcp'", specifier = ">=2.27.0" }, + { name = "griffe", marker = "extra == 'agents'", specifier = ">=1.7.3,<2.0" }, + { name = "httpx", specifier = ">=0.28.1" }, + { name = "invoke", specifier = ">=2.2.0,<3.0.0" }, + { name = "mcp", marker = "extra == 'agents'", specifier = ">=1.0,<2.0" }, + { name = "opentelemetry-api", specifier = ">=1.33.1,<2.0.0" }, + { name = "opentelemetry-exporter-otlp-proto-http", specifier = ">=1.37.0,<2.0.0" }, + { name = "opentelemetry-sdk", specifier = ">=1.33.1,<2.0.0" }, + { name = "opentelemetry-semantic-conventions", specifier = ">=0.59b0,<0.61" }, + { name = "pydantic", specifier = ">=2.11.2" }, + { name = "python-dateutil", specifier = ">=2.8.2" }, + { name = "pyyaml", specifier = ">=6.0.2,<7.0.0" }, + { name = "requests", marker = "extra == 'gcp'", specifier = ">=2.32.3" }, + { name = "typing-inspection", specifier = ">=0.4.0" }, + { name = "websockets", marker = "extra == 'realtime'", specifier = ">=13.0" }, +] +provides-extras = ["gcp", "agents", "realtime"] + +[package.metadata.requires-dev] +dev = [ + { name = "authlib", specifier = ">=1.5.2,<2" }, + { name = "griffe", specifier = ">=1.7.3,<2" }, + { name = "mcp", specifier = ">=1.0,<2" }, + { name = "mypy", specifier = "==1.15.0" }, + { name = "pylint", specifier = "==3.2.3" }, + { name = "pytest", specifier = ">=8.2.2,<9" }, + { name = "pytest-asyncio", specifier = ">=0.23.7,<0.24" }, + { name = "types-authlib", specifier = ">=1.5.0.20250516,<2" }, + { name = "types-python-dateutil", specifier = ">=2.9.0.20240316,<3" }, + { name = "types-pyyaml", specifier = ">=6.0.12.20250516,<7" }, + { name = "websockets", specifier = ">=13.0" }, +] +lint = [ + { name = "mypy", specifier = "==1.15.0" }, + { name = "pyright", specifier = ">=1.1.401,<2" }, + { name = "ruff", specifier = ">=0.11.10,<0.12" }, +] + +[[package]] +name = "mypy" +version = "1.15.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "mypy-extensions" }, + { name = "tomli", marker = "python_full_version < '3.11'" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ce/43/d5e49a86afa64bd3839ea0d5b9c7103487007d728e1293f52525d6d5486a/mypy-1.15.0.tar.gz", hash = "sha256:404534629d51d3efea5c800ee7c42b72a6554d6c400e6a79eafe15d11341fd43", size = 3239717, upload-time = "2025-02-05T03:50:34.655Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/68/f8/65a7ce8d0e09b6329ad0c8d40330d100ea343bd4dd04c4f8ae26462d0a17/mypy-1.15.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:979e4e1a006511dacf628e36fadfecbcc0160a8af6ca7dad2f5025529e082c13", size = 10738433, upload-time = "2025-02-05T03:49:29.145Z" }, + { url = "https://files.pythonhosted.org/packages/b4/95/9c0ecb8eacfe048583706249439ff52105b3f552ea9c4024166c03224270/mypy-1.15.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c4bb0e1bd29f7d34efcccd71cf733580191e9a264a2202b0239da95984c5b559", size = 9861472, upload-time = "2025-02-05T03:49:16.986Z" }, + { url = "https://files.pythonhosted.org/packages/84/09/9ec95e982e282e20c0d5407bc65031dfd0f0f8ecc66b69538296e06fcbee/mypy-1.15.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:be68172e9fd9ad8fb876c6389f16d1c1b5f100ffa779f77b1fb2176fcc9ab95b", size = 11611424, upload-time = "2025-02-05T03:49:46.908Z" }, + { url = "https://files.pythonhosted.org/packages/78/13/f7d14e55865036a1e6a0a69580c240f43bc1f37407fe9235c0d4ef25ffb0/mypy-1.15.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c7be1e46525adfa0d97681432ee9fcd61a3964c2446795714699a998d193f1a3", size = 12365450, upload-time = "2025-02-05T03:50:05.89Z" }, + { url = "https://files.pythonhosted.org/packages/48/e1/301a73852d40c241e915ac6d7bcd7fedd47d519246db2d7b86b9d7e7a0cb/mypy-1.15.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:2e2c2e6d3593f6451b18588848e66260ff62ccca522dd231cd4dd59b0160668b", size = 12551765, upload-time = "2025-02-05T03:49:33.56Z" }, + { url = "https://files.pythonhosted.org/packages/77/ba/c37bc323ae5fe7f3f15a28e06ab012cd0b7552886118943e90b15af31195/mypy-1.15.0-cp310-cp310-win_amd64.whl", hash = "sha256:6983aae8b2f653e098edb77f893f7b6aca69f6cffb19b2cc7443f23cce5f4828", size = 9274701, upload-time = "2025-02-05T03:49:38.981Z" }, + { url = "https://files.pythonhosted.org/packages/03/bc/f6339726c627bd7ca1ce0fa56c9ae2d0144604a319e0e339bdadafbbb599/mypy-1.15.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2922d42e16d6de288022e5ca321cd0618b238cfc5570e0263e5ba0a77dbef56f", size = 10662338, upload-time = "2025-02-05T03:50:17.287Z" }, + { url = "https://files.pythonhosted.org/packages/e2/90/8dcf506ca1a09b0d17555cc00cd69aee402c203911410136cd716559efe7/mypy-1.15.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2ee2d57e01a7c35de00f4634ba1bbf015185b219e4dc5909e281016df43f5ee5", size = 9787540, upload-time = "2025-02-05T03:49:51.21Z" }, + { url = "https://files.pythonhosted.org/packages/05/05/a10f9479681e5da09ef2f9426f650d7b550d4bafbef683b69aad1ba87457/mypy-1.15.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:973500e0774b85d9689715feeffcc980193086551110fd678ebe1f4342fb7c5e", size = 11538051, upload-time = "2025-02-05T03:50:20.885Z" }, + { url = "https://files.pythonhosted.org/packages/e9/9a/1f7d18b30edd57441a6411fcbc0c6869448d1a4bacbaee60656ac0fc29c8/mypy-1.15.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5a95fb17c13e29d2d5195869262f8125dfdb5c134dc8d9a9d0aecf7525b10c2c", size = 12286751, upload-time = "2025-02-05T03:49:42.408Z" }, + { url = "https://files.pythonhosted.org/packages/72/af/19ff499b6f1dafcaf56f9881f7a965ac2f474f69f6f618b5175b044299f5/mypy-1.15.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1905f494bfd7d85a23a88c5d97840888a7bd516545fc5aaedff0267e0bb54e2f", size = 12421783, upload-time = "2025-02-05T03:49:07.707Z" }, + { url = "https://files.pythonhosted.org/packages/96/39/11b57431a1f686c1aed54bf794870efe0f6aeca11aca281a0bd87a5ad42c/mypy-1.15.0-cp311-cp311-win_amd64.whl", hash = "sha256:c9817fa23833ff189db061e6d2eff49b2f3b6ed9856b4a0a73046e41932d744f", size = 9265618, upload-time = "2025-02-05T03:49:54.581Z" }, + { url = "https://files.pythonhosted.org/packages/98/3a/03c74331c5eb8bd025734e04c9840532226775c47a2c39b56a0c8d4f128d/mypy-1.15.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:aea39e0583d05124836ea645f412e88a5c7d0fd77a6d694b60d9b6b2d9f184fd", size = 10793981, upload-time = "2025-02-05T03:50:28.25Z" }, + { url = "https://files.pythonhosted.org/packages/f0/1a/41759b18f2cfd568848a37c89030aeb03534411eef981df621d8fad08a1d/mypy-1.15.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2f2147ab812b75e5b5499b01ade1f4a81489a147c01585cda36019102538615f", size = 9749175, upload-time = "2025-02-05T03:50:13.411Z" }, + { url = "https://files.pythonhosted.org/packages/12/7e/873481abf1ef112c582db832740f4c11b2bfa510e829d6da29b0ab8c3f9c/mypy-1.15.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ce436f4c6d218a070048ed6a44c0bbb10cd2cc5e272b29e7845f6a2f57ee4464", size = 11455675, upload-time = "2025-02-05T03:50:31.421Z" }, + { url = "https://files.pythonhosted.org/packages/b3/d0/92ae4cde706923a2d3f2d6c39629134063ff64b9dedca9c1388363da072d/mypy-1.15.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8023ff13985661b50a5928fc7a5ca15f3d1affb41e5f0a9952cb68ef090b31ee", size = 12410020, upload-time = "2025-02-05T03:48:48.705Z" }, + { url = "https://files.pythonhosted.org/packages/46/8b/df49974b337cce35f828ba6fda228152d6db45fed4c86ba56ffe442434fd/mypy-1.15.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1124a18bc11a6a62887e3e137f37f53fbae476dc36c185d549d4f837a2a6a14e", size = 12498582, upload-time = "2025-02-05T03:49:03.628Z" }, + { url = "https://files.pythonhosted.org/packages/13/50/da5203fcf6c53044a0b699939f31075c45ae8a4cadf538a9069b165c1050/mypy-1.15.0-cp312-cp312-win_amd64.whl", hash = "sha256:171a9ca9a40cd1843abeca0e405bc1940cd9b305eaeea2dda769ba096932bb22", size = 9366614, upload-time = "2025-02-05T03:50:00.313Z" }, + { url = "https://files.pythonhosted.org/packages/6a/9b/fd2e05d6ffff24d912f150b87db9e364fa8282045c875654ce7e32fffa66/mypy-1.15.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:93faf3fdb04768d44bf28693293f3904bbb555d076b781ad2530214ee53e3445", size = 10788592, upload-time = "2025-02-05T03:48:55.789Z" }, + { url = "https://files.pythonhosted.org/packages/74/37/b246d711c28a03ead1fd906bbc7106659aed7c089d55fe40dd58db812628/mypy-1.15.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:811aeccadfb730024c5d3e326b2fbe9249bb7413553f15499a4050f7c30e801d", size = 9753611, upload-time = "2025-02-05T03:48:44.581Z" }, + { url = "https://files.pythonhosted.org/packages/a6/ac/395808a92e10cfdac8003c3de9a2ab6dc7cde6c0d2a4df3df1b815ffd067/mypy-1.15.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:98b7b9b9aedb65fe628c62a6dc57f6d5088ef2dfca37903a7d9ee374d03acca5", size = 11438443, upload-time = "2025-02-05T03:49:25.514Z" }, + { url = "https://files.pythonhosted.org/packages/d2/8b/801aa06445d2de3895f59e476f38f3f8d610ef5d6908245f07d002676cbf/mypy-1.15.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c43a7682e24b4f576d93072216bf56eeff70d9140241f9edec0c104d0c515036", size = 12402541, upload-time = "2025-02-05T03:49:57.623Z" }, + { url = "https://files.pythonhosted.org/packages/c7/67/5a4268782eb77344cc613a4cf23540928e41f018a9a1ec4c6882baf20ab8/mypy-1.15.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:baefc32840a9f00babd83251560e0ae1573e2f9d1b067719479bfb0e987c6357", size = 12494348, upload-time = "2025-02-05T03:48:52.361Z" }, + { url = "https://files.pythonhosted.org/packages/83/3e/57bb447f7bbbfaabf1712d96f9df142624a386d98fb026a761532526057e/mypy-1.15.0-cp313-cp313-win_amd64.whl", hash = "sha256:b9378e2c00146c44793c98b8d5a61039a048e31f429fb0eb546d93f4b000bedf", size = 9373648, upload-time = "2025-02-05T03:49:11.395Z" }, + { url = "https://files.pythonhosted.org/packages/09/4e/a7d65c7322c510de2c409ff3828b03354a7c43f5a8ed458a7a131b41c7b9/mypy-1.15.0-py3-none-any.whl", hash = "sha256:5469affef548bd1895d86d3bf10ce2b44e33d86923c29e4d675b3e323437ea3e", size = 2221777, upload-time = "2025-02-05T03:50:08.348Z" }, +] + +[[package]] +name = "mypy-extensions" +version = "1.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a2/6e/371856a3fb9d31ca8dac321cda606860fa4548858c0cc45d9d1d4ca2628b/mypy_extensions-1.1.0.tar.gz", hash = "sha256:52e68efc3284861e772bbcd66823fde5ae21fd2fdb51c62a211403730b916558", size = 6343, upload-time = "2025-04-22T14:54:24.164Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/79/7b/2c79738432f5c924bef5071f933bcc9efd0473bac3b4aa584a6f7c1c8df8/mypy_extensions-1.1.0-py3-none-any.whl", hash = "sha256:1be4cccdb0f2482337c4743e60421de3a356cd97508abadd57d47403e94f5505", size = 4963, upload-time = "2025-04-22T14:54:22.983Z" }, +] + +[[package]] +name = "nodeenv" +version = "1.10.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/24/bf/d1bda4f6168e0b2e9e5958945e01910052158313224ada5ce1fb2e1113b8/nodeenv-1.10.0.tar.gz", hash = "sha256:996c191ad80897d076bdfba80a41994c2b47c68e224c542b48feba42ba00f8bb", size = 55611, upload-time = "2025-12-20T14:08:54.006Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/88/b2/d0896bdcdc8d28a7fc5717c305f1a861c26e18c05047949fb371034d98bd/nodeenv-1.10.0-py2.py3-none-any.whl", hash = "sha256:5bb13e3eed2923615535339b3c620e76779af4cb4c6a90deccc9e36b274d3827", size = 23438, upload-time = "2025-12-20T14:08:52.782Z" }, +] + +[[package]] +name = "opentelemetry-api" +version = "1.38.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "importlib-metadata" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/08/d8/0f354c375628e048bd0570645b310797299754730079853095bf000fba69/opentelemetry_api-1.38.0.tar.gz", hash = "sha256:f4c193b5e8acb0912b06ac5b16321908dd0843d75049c091487322284a3eea12", size = 65242, upload-time = "2025-10-16T08:35:50.25Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ae/a2/d86e01c28300bd41bab8f18afd613676e2bd63515417b77636fc1add426f/opentelemetry_api-1.38.0-py3-none-any.whl", hash = "sha256:2891b0197f47124454ab9f0cf58f3be33faca394457ac3e09daba13ff50aa582", size = 65947, upload-time = "2025-10-16T08:35:30.23Z" }, +] + +[[package]] +name = "opentelemetry-exporter-otlp-proto-common" +version = "1.38.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-proto" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/19/83/dd4660f2956ff88ed071e9e0e36e830df14b8c5dc06722dbde1841accbe8/opentelemetry_exporter_otlp_proto_common-1.38.0.tar.gz", hash = "sha256:e333278afab4695aa8114eeb7bf4e44e65c6607d54968271a249c180b2cb605c", size = 20431, upload-time = "2025-10-16T08:35:53.285Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a7/9e/55a41c9601191e8cd8eb626b54ee6827b9c9d4a46d736f32abc80d8039fc/opentelemetry_exporter_otlp_proto_common-1.38.0-py3-none-any.whl", hash = "sha256:03cb76ab213300fe4f4c62b7d8f17d97fcfd21b89f0b5ce38ea156327ddda74a", size = 18359, upload-time = "2025-10-16T08:35:34.099Z" }, +] + +[[package]] +name = "opentelemetry-exporter-otlp-proto-http" +version = "1.38.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "googleapis-common-protos" }, + { name = "opentelemetry-api" }, + { name = "opentelemetry-exporter-otlp-proto-common" }, + { name = "opentelemetry-proto" }, + { name = "opentelemetry-sdk" }, + { name = "requests" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/81/0a/debcdfb029fbd1ccd1563f7c287b89a6f7bef3b2902ade56797bfd020854/opentelemetry_exporter_otlp_proto_http-1.38.0.tar.gz", hash = "sha256:f16bd44baf15cbe07633c5112ffc68229d0edbeac7b37610be0b2def4e21e90b", size = 17282, upload-time = "2025-10-16T08:35:54.422Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e5/77/154004c99fb9f291f74aa0822a2f5bbf565a72d8126b3a1b63ed8e5f83c7/opentelemetry_exporter_otlp_proto_http-1.38.0-py3-none-any.whl", hash = "sha256:84b937305edfc563f08ec69b9cb2298be8188371217e867c1854d77198d0825b", size = 19579, upload-time = "2025-10-16T08:35:36.269Z" }, +] + +[[package]] +name = "opentelemetry-proto" +version = "1.38.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "protobuf" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/51/14/f0c4f0f6371b9cb7f9fa9ee8918bfd59ac7040c7791f1e6da32a1839780d/opentelemetry_proto-1.38.0.tar.gz", hash = "sha256:88b161e89d9d372ce723da289b7da74c3a8354a8e5359992be813942969ed468", size = 46152, upload-time = "2025-10-16T08:36:01.612Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b6/6a/82b68b14efca5150b2632f3692d627afa76b77378c4999f2648979409528/opentelemetry_proto-1.38.0-py3-none-any.whl", hash = "sha256:b6ebe54d3217c42e45462e2a1ae28c3e2bf2ec5a5645236a490f55f45f1a0a18", size = 72535, upload-time = "2025-10-16T08:35:45.749Z" }, +] + +[[package]] +name = "opentelemetry-sdk" +version = "1.38.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-semantic-conventions" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/85/cb/f0eee1445161faf4c9af3ba7b848cc22a50a3d3e2515051ad8628c35ff80/opentelemetry_sdk-1.38.0.tar.gz", hash = "sha256:93df5d4d871ed09cb4272305be4d996236eedb232253e3ab864c8620f051cebe", size = 171942, upload-time = "2025-10-16T08:36:02.257Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2f/2e/e93777a95d7d9c40d270a371392b6d6f1ff170c2a3cb32d6176741b5b723/opentelemetry_sdk-1.38.0-py3-none-any.whl", hash = "sha256:1c66af6564ecc1553d72d811a01df063ff097cdc82ce188da9951f93b8d10f6b", size = 132349, upload-time = "2025-10-16T08:35:46.995Z" }, +] + +[[package]] +name = "opentelemetry-semantic-conventions" +version = "0.59b0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/40/bc/8b9ad3802cd8ac6583a4eb7de7e5d7db004e89cb7efe7008f9c8a537ee75/opentelemetry_semantic_conventions-0.59b0.tar.gz", hash = "sha256:7a6db3f30d70202d5bf9fa4b69bc866ca6a30437287de6c510fb594878aed6b0", size = 129861, upload-time = "2025-10-16T08:36:03.346Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/24/7d/c88d7b15ba8fe5c6b8f93be50fc11795e9fc05386c44afaf6b76fe191f9b/opentelemetry_semantic_conventions-0.59b0-py3-none-any.whl", hash = "sha256:35d3b8833ef97d614136e253c1da9342b4c3c083bbaf29ce31d572a1c3825eed", size = 207954, upload-time = "2025-10-16T08:35:48.054Z" }, +] + +[[package]] +name = "packaging" +version = "25.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a1/d4/1fc4078c65507b51b96ca8f8c3ba19e6a61c8253c72794544580a7b6c24d/packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f", size = 165727, upload-time = "2025-04-19T11:48:59.673Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/20/12/38679034af332785aac8774540895e234f4d07f7545804097de4b666afd8/packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484", size = 66469, upload-time = "2025-04-19T11:48:57.875Z" }, +] + +[[package]] +name = "platformdirs" +version = "4.5.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/cf/86/0248f086a84f01b37aaec0fa567b397df1a119f73c16f6c7a9aac73ea309/platformdirs-4.5.1.tar.gz", hash = "sha256:61d5cdcc6065745cdd94f0f878977f8de9437be93de97c1c12f853c9c0cdcbda", size = 21715, upload-time = "2025-12-05T13:52:58.638Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cb/28/3bfe2fa5a7b9c46fe7e13c97bda14c895fb10fa2ebf1d0abb90e0cea7ee1/platformdirs-4.5.1-py3-none-any.whl", hash = "sha256:d03afa3963c806a9bed9d5125c8f4cb2fdaf74a55ab60e5d59b3fde758104d31", size = 18731, upload-time = "2025-12-05T13:52:56.823Z" }, +] + +[[package]] +name = "pluggy" +version = "1.6.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f9/e2/3e91f31a7d2b083fe6ef3fa267035b518369d9511ffab804f839851d2779/pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3", size = 69412, upload-time = "2025-05-15T12:30:07.975Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/54/20/4d324d65cc6d9205fabedc306948156824eb9f0ee1633355a8f7ec5c66bf/pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746", size = 20538, upload-time = "2025-05-15T12:30:06.134Z" }, +] + +[[package]] +name = "protobuf" +version = "6.33.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/34/44/e49ecff446afeec9d1a66d6bbf9adc21e3c7cea7803a920ca3773379d4f6/protobuf-6.33.2.tar.gz", hash = "sha256:56dc370c91fbb8ac85bc13582c9e373569668a290aa2e66a590c2a0d35ddb9e4", size = 444296, upload-time = "2025-12-06T00:17:53.311Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bc/91/1e3a34881a88697a7354ffd177e8746e97a722e5e8db101544b47e84afb1/protobuf-6.33.2-cp310-abi3-win32.whl", hash = "sha256:87eb388bd2d0f78febd8f4c8779c79247b26a5befad525008e49a6955787ff3d", size = 425603, upload-time = "2025-12-06T00:17:41.114Z" }, + { url = "https://files.pythonhosted.org/packages/64/20/4d50191997e917ae13ad0a235c8b42d8c1ab9c3e6fd455ca16d416944355/protobuf-6.33.2-cp310-abi3-win_amd64.whl", hash = "sha256:fc2a0e8b05b180e5fc0dd1559fe8ebdae21a27e81ac77728fb6c42b12c7419b4", size = 436930, upload-time = "2025-12-06T00:17:43.278Z" }, + { url = "https://files.pythonhosted.org/packages/b2/ca/7e485da88ba45c920fb3f50ae78de29ab925d9e54ef0de678306abfbb497/protobuf-6.33.2-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:d9b19771ca75935b3a4422957bc518b0cecb978b31d1dd12037b088f6bcc0e43", size = 427621, upload-time = "2025-12-06T00:17:44.445Z" }, + { url = "https://files.pythonhosted.org/packages/7d/4f/f743761e41d3b2b2566748eb76bbff2b43e14d5fcab694f494a16458b05f/protobuf-6.33.2-cp39-abi3-manylinux2014_aarch64.whl", hash = "sha256:b5d3b5625192214066d99b2b605f5783483575656784de223f00a8d00754fc0e", size = 324460, upload-time = "2025-12-06T00:17:45.678Z" }, + { url = "https://files.pythonhosted.org/packages/b1/fa/26468d00a92824020f6f2090d827078c09c9c587e34cbfd2d0c7911221f8/protobuf-6.33.2-cp39-abi3-manylinux2014_s390x.whl", hash = "sha256:8cd7640aee0b7828b6d03ae518b5b4806fdfc1afe8de82f79c3454f8aef29872", size = 339168, upload-time = "2025-12-06T00:17:46.813Z" }, + { url = "https://files.pythonhosted.org/packages/56/13/333b8f421738f149d4fe5e49553bc2a2ab75235486259f689b4b91f96cec/protobuf-6.33.2-cp39-abi3-manylinux2014_x86_64.whl", hash = "sha256:1f8017c48c07ec5859106533b682260ba3d7c5567b1ca1f24297ce03384d1b4f", size = 323270, upload-time = "2025-12-06T00:17:48.253Z" }, + { url = "https://files.pythonhosted.org/packages/0e/15/4f02896cc3df04fc465010a4c6a0cd89810f54617a32a70ef531ed75d61c/protobuf-6.33.2-py3-none-any.whl", hash = "sha256:7636aad9bb01768870266de5dc009de2d1b936771b38a793f73cbbf279c91c5c", size = 170501, upload-time = "2025-12-06T00:17:52.211Z" }, +] + +[[package]] +name = "pyasn1" +version = "0.6.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ba/e9/01f1a64245b89f039897cb0130016d79f77d52669aae6ee7b159a6c4c018/pyasn1-0.6.1.tar.gz", hash = "sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034", size = 145322, upload-time = "2024-09-10T22:41:42.55Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c8/f1/d6a797abb14f6283c0ddff96bbdd46937f64122b8c925cab503dd37f8214/pyasn1-0.6.1-py3-none-any.whl", hash = "sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629", size = 83135, upload-time = "2024-09-11T16:00:36.122Z" }, +] + +[[package]] +name = "pyasn1-modules" +version = "0.4.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pyasn1" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e9/e6/78ebbb10a8c8e4b61a59249394a4a594c1a7af95593dc933a349c8d00964/pyasn1_modules-0.4.2.tar.gz", hash = "sha256:677091de870a80aae844b1ca6134f54652fa2c8c5a52aa396440ac3106e941e6", size = 307892, upload-time = "2025-03-28T02:41:22.17Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/47/8d/d529b5d697919ba8c11ad626e835d4039be708a35b0d22de83a269a6682c/pyasn1_modules-0.4.2-py3-none-any.whl", hash = "sha256:29253a9207ce32b64c3ac6600edc75368f98473906e8fd1043bd6b5b1de2c14a", size = 181259, upload-time = "2025-03-28T02:41:19.028Z" }, +] + +[[package]] +name = "pycparser" +version = "2.23" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/fe/cf/d2d3b9f5699fb1e4615c8e32ff220203e43b248e1dfcc6736ad9057731ca/pycparser-2.23.tar.gz", hash = "sha256:78816d4f24add8f10a06d6f05b4d424ad9e96cfebf68a4ddc99c65c0720d00c2", size = 173734, upload-time = "2025-09-09T13:23:47.91Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a0/e3/59cd50310fc9b59512193629e1984c1f95e5c8ae6e5d8c69532ccc65a7fe/pycparser-2.23-py3-none-any.whl", hash = "sha256:e5c6e8d3fbad53479cab09ac03729e0a9faf2bee3db8208a550daf5af81a5934", size = 118140, upload-time = "2025-09-09T13:23:46.651Z" }, +] + +[[package]] +name = "pydantic" +version = "2.12.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "annotated-types" }, + { name = "pydantic-core" }, + { name = "typing-extensions" }, + { name = "typing-inspection" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/69/44/36f1a6e523abc58ae5f928898e4aca2e0ea509b5aa6f6f392a5d882be928/pydantic-2.12.5.tar.gz", hash = "sha256:4d351024c75c0f085a9febbb665ce8c0c6ec5d30e903bdb6394b7ede26aebb49", size = 821591, upload-time = "2025-11-26T15:11:46.471Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5a/87/b70ad306ebb6f9b585f114d0ac2137d792b48be34d732d60e597c2f8465a/pydantic-2.12.5-py3-none-any.whl", hash = "sha256:e561593fccf61e8a20fc46dfc2dfe075b8be7d0188df33f221ad1f0139180f9d", size = 463580, upload-time = "2025-11-26T15:11:44.605Z" }, +] + +[[package]] +name = "pydantic-core" +version = "2.41.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/71/70/23b021c950c2addd24ec408e9ab05d59b035b39d97cdc1130e1bce647bb6/pydantic_core-2.41.5.tar.gz", hash = "sha256:08daa51ea16ad373ffd5e7606252cc32f07bc72b28284b6bc9c6df804816476e", size = 460952, upload-time = "2025-11-04T13:43:49.098Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c6/90/32c9941e728d564b411d574d8ee0cf09b12ec978cb22b294995bae5549a5/pydantic_core-2.41.5-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:77b63866ca88d804225eaa4af3e664c5faf3568cea95360d21f4725ab6e07146", size = 2107298, upload-time = "2025-11-04T13:39:04.116Z" }, + { url = "https://files.pythonhosted.org/packages/fb/a8/61c96a77fe28993d9a6fb0f4127e05430a267b235a124545d79fea46dd65/pydantic_core-2.41.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:dfa8a0c812ac681395907e71e1274819dec685fec28273a28905df579ef137e2", size = 1901475, upload-time = "2025-11-04T13:39:06.055Z" }, + { url = "https://files.pythonhosted.org/packages/5d/b6/338abf60225acc18cdc08b4faef592d0310923d19a87fba1faf05af5346e/pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5921a4d3ca3aee735d9fd163808f5e8dd6c6972101e4adbda9a4667908849b97", size = 1918815, upload-time = "2025-11-04T13:39:10.41Z" }, + { url = "https://files.pythonhosted.org/packages/d1/1c/2ed0433e682983d8e8cba9c8d8ef274d4791ec6a6f24c58935b90e780e0a/pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e25c479382d26a2a41b7ebea1043564a937db462816ea07afa8a44c0866d52f9", size = 2065567, upload-time = "2025-11-04T13:39:12.244Z" }, + { url = "https://files.pythonhosted.org/packages/b3/24/cf84974ee7d6eae06b9e63289b7b8f6549d416b5c199ca2d7ce13bbcf619/pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f547144f2966e1e16ae626d8ce72b4cfa0caedc7fa28052001c94fb2fcaa1c52", size = 2230442, upload-time = "2025-11-04T13:39:13.962Z" }, + { url = "https://files.pythonhosted.org/packages/fd/21/4e287865504b3edc0136c89c9c09431be326168b1eb7841911cbc877a995/pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6f52298fbd394f9ed112d56f3d11aabd0d5bd27beb3084cc3d8ad069483b8941", size = 2350956, upload-time = "2025-11-04T13:39:15.889Z" }, + { url = "https://files.pythonhosted.org/packages/a8/76/7727ef2ffa4b62fcab916686a68a0426b9b790139720e1934e8ba797e238/pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:100baa204bb412b74fe285fb0f3a385256dad1d1879f0a5cb1499ed2e83d132a", size = 2068253, upload-time = "2025-11-04T13:39:17.403Z" }, + { url = "https://files.pythonhosted.org/packages/d5/8c/a4abfc79604bcb4c748e18975c44f94f756f08fb04218d5cb87eb0d3a63e/pydantic_core-2.41.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:05a2c8852530ad2812cb7914dc61a1125dc4e06252ee98e5638a12da6cc6fb6c", size = 2177050, upload-time = "2025-11-04T13:39:19.351Z" }, + { url = "https://files.pythonhosted.org/packages/67/b1/de2e9a9a79b480f9cb0b6e8b6ba4c50b18d4e89852426364c66aa82bb7b3/pydantic_core-2.41.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:29452c56df2ed968d18d7e21f4ab0ac55e71dc59524872f6fc57dcf4a3249ed2", size = 2147178, upload-time = "2025-11-04T13:39:21Z" }, + { url = "https://files.pythonhosted.org/packages/16/c1/dfb33f837a47b20417500efaa0378adc6635b3c79e8369ff7a03c494b4ac/pydantic_core-2.41.5-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:d5160812ea7a8a2ffbe233d8da666880cad0cbaf5d4de74ae15c313213d62556", size = 2341833, upload-time = "2025-11-04T13:39:22.606Z" }, + { url = "https://files.pythonhosted.org/packages/47/36/00f398642a0f4b815a9a558c4f1dca1b4020a7d49562807d7bc9ff279a6c/pydantic_core-2.41.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:df3959765b553b9440adfd3c795617c352154e497a4eaf3752555cfb5da8fc49", size = 2321156, upload-time = "2025-11-04T13:39:25.843Z" }, + { url = "https://files.pythonhosted.org/packages/7e/70/cad3acd89fde2010807354d978725ae111ddf6d0ea46d1ea1775b5c1bd0c/pydantic_core-2.41.5-cp310-cp310-win32.whl", hash = "sha256:1f8d33a7f4d5a7889e60dc39856d76d09333d8a6ed0f5f1190635cbec70ec4ba", size = 1989378, upload-time = "2025-11-04T13:39:27.92Z" }, + { url = "https://files.pythonhosted.org/packages/76/92/d338652464c6c367e5608e4488201702cd1cbb0f33f7b6a85a60fe5f3720/pydantic_core-2.41.5-cp310-cp310-win_amd64.whl", hash = "sha256:62de39db01b8d593e45871af2af9e497295db8d73b085f6bfd0b18c83c70a8f9", size = 2013622, upload-time = "2025-11-04T13:39:29.848Z" }, + { url = "https://files.pythonhosted.org/packages/e8/72/74a989dd9f2084b3d9530b0915fdda64ac48831c30dbf7c72a41a5232db8/pydantic_core-2.41.5-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:a3a52f6156e73e7ccb0f8cced536adccb7042be67cb45f9562e12b319c119da6", size = 2105873, upload-time = "2025-11-04T13:39:31.373Z" }, + { url = "https://files.pythonhosted.org/packages/12/44/37e403fd9455708b3b942949e1d7febc02167662bf1a7da5b78ee1ea2842/pydantic_core-2.41.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7f3bf998340c6d4b0c9a2f02d6a400e51f123b59565d74dc60d252ce888c260b", size = 1899826, upload-time = "2025-11-04T13:39:32.897Z" }, + { url = "https://files.pythonhosted.org/packages/33/7f/1d5cab3ccf44c1935a359d51a8a2a9e1a654b744b5e7f80d41b88d501eec/pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:378bec5c66998815d224c9ca994f1e14c0c21cb95d2f52b6021cc0b2a58f2a5a", size = 1917869, upload-time = "2025-11-04T13:39:34.469Z" }, + { url = "https://files.pythonhosted.org/packages/6e/6a/30d94a9674a7fe4f4744052ed6c5e083424510be1e93da5bc47569d11810/pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e7b576130c69225432866fe2f4a469a85a54ade141d96fd396dffcf607b558f8", size = 2063890, upload-time = "2025-11-04T13:39:36.053Z" }, + { url = "https://files.pythonhosted.org/packages/50/be/76e5d46203fcb2750e542f32e6c371ffa9b8ad17364cf94bb0818dbfb50c/pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6cb58b9c66f7e4179a2d5e0f849c48eff5c1fca560994d6eb6543abf955a149e", size = 2229740, upload-time = "2025-11-04T13:39:37.753Z" }, + { url = "https://files.pythonhosted.org/packages/d3/ee/fed784df0144793489f87db310a6bbf8118d7b630ed07aa180d6067e653a/pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:88942d3a3dff3afc8288c21e565e476fc278902ae4d6d134f1eeda118cc830b1", size = 2350021, upload-time = "2025-11-04T13:39:40.94Z" }, + { url = "https://files.pythonhosted.org/packages/c8/be/8fed28dd0a180dca19e72c233cbf58efa36df055e5b9d90d64fd1740b828/pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f31d95a179f8d64d90f6831d71fa93290893a33148d890ba15de25642c5d075b", size = 2066378, upload-time = "2025-11-04T13:39:42.523Z" }, + { url = "https://files.pythonhosted.org/packages/b0/3b/698cf8ae1d536a010e05121b4958b1257f0b5522085e335360e53a6b1c8b/pydantic_core-2.41.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c1df3d34aced70add6f867a8cf413e299177e0c22660cc767218373d0779487b", size = 2175761, upload-time = "2025-11-04T13:39:44.553Z" }, + { url = "https://files.pythonhosted.org/packages/b8/ba/15d537423939553116dea94ce02f9c31be0fa9d0b806d427e0308ec17145/pydantic_core-2.41.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:4009935984bd36bd2c774e13f9a09563ce8de4abaa7226f5108262fa3e637284", size = 2146303, upload-time = "2025-11-04T13:39:46.238Z" }, + { url = "https://files.pythonhosted.org/packages/58/7f/0de669bf37d206723795f9c90c82966726a2ab06c336deba4735b55af431/pydantic_core-2.41.5-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:34a64bc3441dc1213096a20fe27e8e128bd3ff89921706e83c0b1ac971276594", size = 2340355, upload-time = "2025-11-04T13:39:48.002Z" }, + { url = "https://files.pythonhosted.org/packages/e5/de/e7482c435b83d7e3c3ee5ee4451f6e8973cff0eb6007d2872ce6383f6398/pydantic_core-2.41.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:c9e19dd6e28fdcaa5a1de679aec4141f691023916427ef9bae8584f9c2fb3b0e", size = 2319875, upload-time = "2025-11-04T13:39:49.705Z" }, + { url = "https://files.pythonhosted.org/packages/fe/e6/8c9e81bb6dd7560e33b9053351c29f30c8194b72f2d6932888581f503482/pydantic_core-2.41.5-cp311-cp311-win32.whl", hash = "sha256:2c010c6ded393148374c0f6f0bf89d206bf3217f201faa0635dcd56bd1520f6b", size = 1987549, upload-time = "2025-11-04T13:39:51.842Z" }, + { url = "https://files.pythonhosted.org/packages/11/66/f14d1d978ea94d1bc21fc98fcf570f9542fe55bfcc40269d4e1a21c19bf7/pydantic_core-2.41.5-cp311-cp311-win_amd64.whl", hash = "sha256:76ee27c6e9c7f16f47db7a94157112a2f3a00e958bc626e2f4ee8bec5c328fbe", size = 2011305, upload-time = "2025-11-04T13:39:53.485Z" }, + { url = "https://files.pythonhosted.org/packages/56/d8/0e271434e8efd03186c5386671328154ee349ff0354d83c74f5caaf096ed/pydantic_core-2.41.5-cp311-cp311-win_arm64.whl", hash = "sha256:4bc36bbc0b7584de96561184ad7f012478987882ebf9f9c389b23f432ea3d90f", size = 1972902, upload-time = "2025-11-04T13:39:56.488Z" }, + { url = "https://files.pythonhosted.org/packages/5f/5d/5f6c63eebb5afee93bcaae4ce9a898f3373ca23df3ccaef086d0233a35a7/pydantic_core-2.41.5-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:f41a7489d32336dbf2199c8c0a215390a751c5b014c2c1c5366e817202e9cdf7", size = 2110990, upload-time = "2025-11-04T13:39:58.079Z" }, + { url = "https://files.pythonhosted.org/packages/aa/32/9c2e8ccb57c01111e0fd091f236c7b371c1bccea0fa85247ac55b1e2b6b6/pydantic_core-2.41.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:070259a8818988b9a84a449a2a7337c7f430a22acc0859c6b110aa7212a6d9c0", size = 1896003, upload-time = "2025-11-04T13:39:59.956Z" }, + { url = "https://files.pythonhosted.org/packages/68/b8/a01b53cb0e59139fbc9e4fda3e9724ede8de279097179be4ff31f1abb65a/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e96cea19e34778f8d59fe40775a7a574d95816eb150850a85a7a4c8f4b94ac69", size = 1919200, upload-time = "2025-11-04T13:40:02.241Z" }, + { url = "https://files.pythonhosted.org/packages/38/de/8c36b5198a29bdaade07b5985e80a233a5ac27137846f3bc2d3b40a47360/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed2e99c456e3fadd05c991f8f437ef902e00eedf34320ba2b0842bd1c3ca3a75", size = 2052578, upload-time = "2025-11-04T13:40:04.401Z" }, + { url = "https://files.pythonhosted.org/packages/00/b5/0e8e4b5b081eac6cb3dbb7e60a65907549a1ce035a724368c330112adfdd/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:65840751b72fbfd82c3c640cff9284545342a4f1eb1586ad0636955b261b0b05", size = 2208504, upload-time = "2025-11-04T13:40:06.072Z" }, + { url = "https://files.pythonhosted.org/packages/77/56/87a61aad59c7c5b9dc8caad5a41a5545cba3810c3e828708b3d7404f6cef/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e536c98a7626a98feb2d3eaf75944ef6f3dbee447e1f841eae16f2f0a72d8ddc", size = 2335816, upload-time = "2025-11-04T13:40:07.835Z" }, + { url = "https://files.pythonhosted.org/packages/0d/76/941cc9f73529988688a665a5c0ecff1112b3d95ab48f81db5f7606f522d3/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eceb81a8d74f9267ef4081e246ffd6d129da5d87e37a77c9bde550cb04870c1c", size = 2075366, upload-time = "2025-11-04T13:40:09.804Z" }, + { url = "https://files.pythonhosted.org/packages/d3/43/ebef01f69baa07a482844faaa0a591bad1ef129253ffd0cdaa9d8a7f72d3/pydantic_core-2.41.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d38548150c39b74aeeb0ce8ee1d8e82696f4a4e16ddc6de7b1d8823f7de4b9b5", size = 2171698, upload-time = "2025-11-04T13:40:12.004Z" }, + { url = "https://files.pythonhosted.org/packages/b1/87/41f3202e4193e3bacfc2c065fab7706ebe81af46a83d3e27605029c1f5a6/pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:c23e27686783f60290e36827f9c626e63154b82b116d7fe9adba1fda36da706c", size = 2132603, upload-time = "2025-11-04T13:40:13.868Z" }, + { url = "https://files.pythonhosted.org/packages/49/7d/4c00df99cb12070b6bccdef4a195255e6020a550d572768d92cc54dba91a/pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:482c982f814460eabe1d3bb0adfdc583387bd4691ef00b90575ca0d2b6fe2294", size = 2329591, upload-time = "2025-11-04T13:40:15.672Z" }, + { url = "https://files.pythonhosted.org/packages/cc/6a/ebf4b1d65d458f3cda6a7335d141305dfa19bdc61140a884d165a8a1bbc7/pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:bfea2a5f0b4d8d43adf9d7b8bf019fb46fdd10a2e5cde477fbcb9d1fa08c68e1", size = 2319068, upload-time = "2025-11-04T13:40:17.532Z" }, + { url = "https://files.pythonhosted.org/packages/49/3b/774f2b5cd4192d5ab75870ce4381fd89cf218af999515baf07e7206753f0/pydantic_core-2.41.5-cp312-cp312-win32.whl", hash = "sha256:b74557b16e390ec12dca509bce9264c3bbd128f8a2c376eaa68003d7f327276d", size = 1985908, upload-time = "2025-11-04T13:40:19.309Z" }, + { url = "https://files.pythonhosted.org/packages/86/45/00173a033c801cacf67c190fef088789394feaf88a98a7035b0e40d53dc9/pydantic_core-2.41.5-cp312-cp312-win_amd64.whl", hash = "sha256:1962293292865bca8e54702b08a4f26da73adc83dd1fcf26fbc875b35d81c815", size = 2020145, upload-time = "2025-11-04T13:40:21.548Z" }, + { url = "https://files.pythonhosted.org/packages/f9/22/91fbc821fa6d261b376a3f73809f907cec5ca6025642c463d3488aad22fb/pydantic_core-2.41.5-cp312-cp312-win_arm64.whl", hash = "sha256:1746d4a3d9a794cacae06a5eaaccb4b8643a131d45fbc9af23e353dc0a5ba5c3", size = 1976179, upload-time = "2025-11-04T13:40:23.393Z" }, + { url = "https://files.pythonhosted.org/packages/87/06/8806241ff1f70d9939f9af039c6c35f2360cf16e93c2ca76f184e76b1564/pydantic_core-2.41.5-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:941103c9be18ac8daf7b7adca8228f8ed6bb7a1849020f643b3a14d15b1924d9", size = 2120403, upload-time = "2025-11-04T13:40:25.248Z" }, + { url = "https://files.pythonhosted.org/packages/94/02/abfa0e0bda67faa65fef1c84971c7e45928e108fe24333c81f3bfe35d5f5/pydantic_core-2.41.5-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:112e305c3314f40c93998e567879e887a3160bb8689ef3d2c04b6cc62c33ac34", size = 1896206, upload-time = "2025-11-04T13:40:27.099Z" }, + { url = "https://files.pythonhosted.org/packages/15/df/a4c740c0943e93e6500f9eb23f4ca7ec9bf71b19e608ae5b579678c8d02f/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0cbaad15cb0c90aa221d43c00e77bb33c93e8d36e0bf74760cd00e732d10a6a0", size = 1919307, upload-time = "2025-11-04T13:40:29.806Z" }, + { url = "https://files.pythonhosted.org/packages/9a/e3/6324802931ae1d123528988e0e86587c2072ac2e5394b4bc2bc34b61ff6e/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:03ca43e12fab6023fc79d28ca6b39b05f794ad08ec2feccc59a339b02f2b3d33", size = 2063258, upload-time = "2025-11-04T13:40:33.544Z" }, + { url = "https://files.pythonhosted.org/packages/c9/d4/2230d7151d4957dd79c3044ea26346c148c98fbf0ee6ebd41056f2d62ab5/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dc799088c08fa04e43144b164feb0c13f9a0bc40503f8df3e9fde58a3c0c101e", size = 2214917, upload-time = "2025-11-04T13:40:35.479Z" }, + { url = "https://files.pythonhosted.org/packages/e6/9f/eaac5df17a3672fef0081b6c1bb0b82b33ee89aa5cec0d7b05f52fd4a1fa/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:97aeba56665b4c3235a0e52b2c2f5ae9cd071b8a8310ad27bddb3f7fb30e9aa2", size = 2332186, upload-time = "2025-11-04T13:40:37.436Z" }, + { url = "https://files.pythonhosted.org/packages/cf/4e/35a80cae583a37cf15604b44240e45c05e04e86f9cfd766623149297e971/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:406bf18d345822d6c21366031003612b9c77b3e29ffdb0f612367352aab7d586", size = 2073164, upload-time = "2025-11-04T13:40:40.289Z" }, + { url = "https://files.pythonhosted.org/packages/bf/e3/f6e262673c6140dd3305d144d032f7bd5f7497d3871c1428521f19f9efa2/pydantic_core-2.41.5-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b93590ae81f7010dbe380cdeab6f515902ebcbefe0b9327cc4804d74e93ae69d", size = 2179146, upload-time = "2025-11-04T13:40:42.809Z" }, + { url = "https://files.pythonhosted.org/packages/75/c7/20bd7fc05f0c6ea2056a4565c6f36f8968c0924f19b7d97bbfea55780e73/pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:01a3d0ab748ee531f4ea6c3e48ad9dac84ddba4b0d82291f87248f2f9de8d740", size = 2137788, upload-time = "2025-11-04T13:40:44.752Z" }, + { url = "https://files.pythonhosted.org/packages/3a/8d/34318ef985c45196e004bc46c6eab2eda437e744c124ef0dbe1ff2c9d06b/pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:6561e94ba9dacc9c61bce40e2d6bdc3bfaa0259d3ff36ace3b1e6901936d2e3e", size = 2340133, upload-time = "2025-11-04T13:40:46.66Z" }, + { url = "https://files.pythonhosted.org/packages/9c/59/013626bf8c78a5a5d9350d12e7697d3d4de951a75565496abd40ccd46bee/pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:915c3d10f81bec3a74fbd4faebe8391013ba61e5a1a8d48c4455b923bdda7858", size = 2324852, upload-time = "2025-11-04T13:40:48.575Z" }, + { url = "https://files.pythonhosted.org/packages/1a/d9/c248c103856f807ef70c18a4f986693a46a8ffe1602e5d361485da502d20/pydantic_core-2.41.5-cp313-cp313-win32.whl", hash = "sha256:650ae77860b45cfa6e2cdafc42618ceafab3a2d9a3811fcfbd3bbf8ac3c40d36", size = 1994679, upload-time = "2025-11-04T13:40:50.619Z" }, + { url = "https://files.pythonhosted.org/packages/9e/8b/341991b158ddab181cff136acd2552c9f35bd30380422a639c0671e99a91/pydantic_core-2.41.5-cp313-cp313-win_amd64.whl", hash = "sha256:79ec52ec461e99e13791ec6508c722742ad745571f234ea6255bed38c6480f11", size = 2019766, upload-time = "2025-11-04T13:40:52.631Z" }, + { url = "https://files.pythonhosted.org/packages/73/7d/f2f9db34af103bea3e09735bb40b021788a5e834c81eedb541991badf8f5/pydantic_core-2.41.5-cp313-cp313-win_arm64.whl", hash = "sha256:3f84d5c1b4ab906093bdc1ff10484838aca54ef08de4afa9de0f5f14d69639cd", size = 1981005, upload-time = "2025-11-04T13:40:54.734Z" }, + { url = "https://files.pythonhosted.org/packages/ea/28/46b7c5c9635ae96ea0fbb779e271a38129df2550f763937659ee6c5dbc65/pydantic_core-2.41.5-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:3f37a19d7ebcdd20b96485056ba9e8b304e27d9904d233d7b1015db320e51f0a", size = 2119622, upload-time = "2025-11-04T13:40:56.68Z" }, + { url = "https://files.pythonhosted.org/packages/74/1a/145646e5687e8d9a1e8d09acb278c8535ebe9e972e1f162ed338a622f193/pydantic_core-2.41.5-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:1d1d9764366c73f996edd17abb6d9d7649a7eb690006ab6adbda117717099b14", size = 1891725, upload-time = "2025-11-04T13:40:58.807Z" }, + { url = "https://files.pythonhosted.org/packages/23/04/e89c29e267b8060b40dca97bfc64a19b2a3cf99018167ea1677d96368273/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25e1c2af0fce638d5f1988b686f3b3ea8cd7de5f244ca147c777769e798a9cd1", size = 1915040, upload-time = "2025-11-04T13:41:00.853Z" }, + { url = "https://files.pythonhosted.org/packages/84/a3/15a82ac7bd97992a82257f777b3583d3e84bdb06ba6858f745daa2ec8a85/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:506d766a8727beef16b7adaeb8ee6217c64fc813646b424d0804d67c16eddb66", size = 2063691, upload-time = "2025-11-04T13:41:03.504Z" }, + { url = "https://files.pythonhosted.org/packages/74/9b/0046701313c6ef08c0c1cf0e028c67c770a4e1275ca73131563c5f2a310a/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4819fa52133c9aa3c387b3328f25c1facc356491e6135b459f1de698ff64d869", size = 2213897, upload-time = "2025-11-04T13:41:05.804Z" }, + { url = "https://files.pythonhosted.org/packages/8a/cd/6bac76ecd1b27e75a95ca3a9a559c643b3afcd2dd62086d4b7a32a18b169/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2b761d210c9ea91feda40d25b4efe82a1707da2ef62901466a42492c028553a2", size = 2333302, upload-time = "2025-11-04T13:41:07.809Z" }, + { url = "https://files.pythonhosted.org/packages/4c/d2/ef2074dc020dd6e109611a8be4449b98cd25e1b9b8a303c2f0fca2f2bcf7/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:22f0fb8c1c583a3b6f24df2470833b40207e907b90c928cc8d3594b76f874375", size = 2064877, upload-time = "2025-11-04T13:41:09.827Z" }, + { url = "https://files.pythonhosted.org/packages/18/66/e9db17a9a763d72f03de903883c057b2592c09509ccfe468187f2a2eef29/pydantic_core-2.41.5-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2782c870e99878c634505236d81e5443092fba820f0373997ff75f90f68cd553", size = 2180680, upload-time = "2025-11-04T13:41:12.379Z" }, + { url = "https://files.pythonhosted.org/packages/d3/9e/3ce66cebb929f3ced22be85d4c2399b8e85b622db77dad36b73c5387f8f8/pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:0177272f88ab8312479336e1d777f6b124537d47f2123f89cb37e0accea97f90", size = 2138960, upload-time = "2025-11-04T13:41:14.627Z" }, + { url = "https://files.pythonhosted.org/packages/a6/62/205a998f4327d2079326b01abee48e502ea739d174f0a89295c481a2272e/pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_armv7l.whl", hash = "sha256:63510af5e38f8955b8ee5687740d6ebf7c2a0886d15a6d65c32814613681bc07", size = 2339102, upload-time = "2025-11-04T13:41:16.868Z" }, + { url = "https://files.pythonhosted.org/packages/3c/0d/f05e79471e889d74d3d88f5bd20d0ed189ad94c2423d81ff8d0000aab4ff/pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:e56ba91f47764cc14f1daacd723e3e82d1a89d783f0f5afe9c364b8bb491ccdb", size = 2326039, upload-time = "2025-11-04T13:41:18.934Z" }, + { url = "https://files.pythonhosted.org/packages/ec/e1/e08a6208bb100da7e0c4b288eed624a703f4d129bde2da475721a80cab32/pydantic_core-2.41.5-cp314-cp314-win32.whl", hash = "sha256:aec5cf2fd867b4ff45b9959f8b20ea3993fc93e63c7363fe6851424c8a7e7c23", size = 1995126, upload-time = "2025-11-04T13:41:21.418Z" }, + { url = "https://files.pythonhosted.org/packages/48/5d/56ba7b24e9557f99c9237e29f5c09913c81eeb2f3217e40e922353668092/pydantic_core-2.41.5-cp314-cp314-win_amd64.whl", hash = "sha256:8e7c86f27c585ef37c35e56a96363ab8de4e549a95512445b85c96d3e2f7c1bf", size = 2015489, upload-time = "2025-11-04T13:41:24.076Z" }, + { url = "https://files.pythonhosted.org/packages/4e/bb/f7a190991ec9e3e0ba22e4993d8755bbc4a32925c0b5b42775c03e8148f9/pydantic_core-2.41.5-cp314-cp314-win_arm64.whl", hash = "sha256:e672ba74fbc2dc8eea59fb6d4aed6845e6905fc2a8afe93175d94a83ba2a01a0", size = 1977288, upload-time = "2025-11-04T13:41:26.33Z" }, + { url = "https://files.pythonhosted.org/packages/92/ed/77542d0c51538e32e15afe7899d79efce4b81eee631d99850edc2f5e9349/pydantic_core-2.41.5-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:8566def80554c3faa0e65ac30ab0932b9e3a5cd7f8323764303d468e5c37595a", size = 2120255, upload-time = "2025-11-04T13:41:28.569Z" }, + { url = "https://files.pythonhosted.org/packages/bb/3d/6913dde84d5be21e284439676168b28d8bbba5600d838b9dca99de0fad71/pydantic_core-2.41.5-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:b80aa5095cd3109962a298ce14110ae16b8c1aece8b72f9dafe81cf597ad80b3", size = 1863760, upload-time = "2025-11-04T13:41:31.055Z" }, + { url = "https://files.pythonhosted.org/packages/5a/f0/e5e6b99d4191da102f2b0eb9687aaa7f5bea5d9964071a84effc3e40f997/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3006c3dd9ba34b0c094c544c6006cc79e87d8612999f1a5d43b769b89181f23c", size = 1878092, upload-time = "2025-11-04T13:41:33.21Z" }, + { url = "https://files.pythonhosted.org/packages/71/48/36fb760642d568925953bcc8116455513d6e34c4beaa37544118c36aba6d/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:72f6c8b11857a856bcfa48c86f5368439f74453563f951e473514579d44aa612", size = 2053385, upload-time = "2025-11-04T13:41:35.508Z" }, + { url = "https://files.pythonhosted.org/packages/20/25/92dc684dd8eb75a234bc1c764b4210cf2646479d54b47bf46061657292a8/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5cb1b2f9742240e4bb26b652a5aeb840aa4b417c7748b6f8387927bc6e45e40d", size = 2218832, upload-time = "2025-11-04T13:41:37.732Z" }, + { url = "https://files.pythonhosted.org/packages/e2/09/f53e0b05023d3e30357d82eb35835d0f6340ca344720a4599cd663dca599/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bd3d54f38609ff308209bd43acea66061494157703364ae40c951f83ba99a1a9", size = 2327585, upload-time = "2025-11-04T13:41:40Z" }, + { url = "https://files.pythonhosted.org/packages/aa/4e/2ae1aa85d6af35a39b236b1b1641de73f5a6ac4d5a7509f77b814885760c/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2ff4321e56e879ee8d2a879501c8e469414d948f4aba74a2d4593184eb326660", size = 2041078, upload-time = "2025-11-04T13:41:42.323Z" }, + { url = "https://files.pythonhosted.org/packages/cd/13/2e215f17f0ef326fc72afe94776edb77525142c693767fc347ed6288728d/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d0d2568a8c11bf8225044aa94409e21da0cb09dcdafe9ecd10250b2baad531a9", size = 2173914, upload-time = "2025-11-04T13:41:45.221Z" }, + { url = "https://files.pythonhosted.org/packages/02/7a/f999a6dcbcd0e5660bc348a3991c8915ce6599f4f2c6ac22f01d7a10816c/pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_aarch64.whl", hash = "sha256:a39455728aabd58ceabb03c90e12f71fd30fa69615760a075b9fec596456ccc3", size = 2129560, upload-time = "2025-11-04T13:41:47.474Z" }, + { url = "https://files.pythonhosted.org/packages/3a/b1/6c990ac65e3b4c079a4fb9f5b05f5b013afa0f4ed6780a3dd236d2cbdc64/pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_armv7l.whl", hash = "sha256:239edca560d05757817c13dc17c50766136d21f7cd0fac50295499ae24f90fdf", size = 2329244, upload-time = "2025-11-04T13:41:49.992Z" }, + { url = "https://files.pythonhosted.org/packages/d9/02/3c562f3a51afd4d88fff8dffb1771b30cfdfd79befd9883ee094f5b6c0d8/pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_x86_64.whl", hash = "sha256:2a5e06546e19f24c6a96a129142a75cee553cc018ffee48a460059b1185f4470", size = 2331955, upload-time = "2025-11-04T13:41:54.079Z" }, + { url = "https://files.pythonhosted.org/packages/5c/96/5fb7d8c3c17bc8c62fdb031c47d77a1af698f1d7a406b0f79aaa1338f9ad/pydantic_core-2.41.5-cp314-cp314t-win32.whl", hash = "sha256:b4ececa40ac28afa90871c2cc2b9ffd2ff0bf749380fbdf57d165fd23da353aa", size = 1988906, upload-time = "2025-11-04T13:41:56.606Z" }, + { url = "https://files.pythonhosted.org/packages/22/ed/182129d83032702912c2e2d8bbe33c036f342cc735737064668585dac28f/pydantic_core-2.41.5-cp314-cp314t-win_amd64.whl", hash = "sha256:80aa89cad80b32a912a65332f64a4450ed00966111b6615ca6816153d3585a8c", size = 1981607, upload-time = "2025-11-04T13:41:58.889Z" }, + { url = "https://files.pythonhosted.org/packages/9f/ed/068e41660b832bb0b1aa5b58011dea2a3fe0ba7861ff38c4d4904c1c1a99/pydantic_core-2.41.5-cp314-cp314t-win_arm64.whl", hash = "sha256:35b44f37a3199f771c3eaa53051bc8a70cd7b54f333531c59e29fd4db5d15008", size = 1974769, upload-time = "2025-11-04T13:42:01.186Z" }, + { url = "https://files.pythonhosted.org/packages/11/72/90fda5ee3b97e51c494938a4a44c3a35a9c96c19bba12372fb9c634d6f57/pydantic_core-2.41.5-graalpy311-graalpy242_311_native-macosx_10_12_x86_64.whl", hash = "sha256:b96d5f26b05d03cc60f11a7761a5ded1741da411e7fe0909e27a5e6a0cb7b034", size = 2115441, upload-time = "2025-11-04T13:42:39.557Z" }, + { url = "https://files.pythonhosted.org/packages/1f/53/8942f884fa33f50794f119012dc6a1a02ac43a56407adaac20463df8e98f/pydantic_core-2.41.5-graalpy311-graalpy242_311_native-macosx_11_0_arm64.whl", hash = "sha256:634e8609e89ceecea15e2d61bc9ac3718caaaa71963717bf3c8f38bfde64242c", size = 1930291, upload-time = "2025-11-04T13:42:42.169Z" }, + { url = "https://files.pythonhosted.org/packages/79/c8/ecb9ed9cd942bce09fc888ee960b52654fbdbede4ba6c2d6e0d3b1d8b49c/pydantic_core-2.41.5-graalpy311-graalpy242_311_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:93e8740d7503eb008aa2df04d3b9735f845d43ae845e6dcd2be0b55a2da43cd2", size = 1948632, upload-time = "2025-11-04T13:42:44.564Z" }, + { url = "https://files.pythonhosted.org/packages/2e/1b/687711069de7efa6af934e74f601e2a4307365e8fdc404703afc453eab26/pydantic_core-2.41.5-graalpy311-graalpy242_311_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f15489ba13d61f670dcc96772e733aad1a6f9c429cc27574c6cdaed82d0146ad", size = 2138905, upload-time = "2025-11-04T13:42:47.156Z" }, + { url = "https://files.pythonhosted.org/packages/09/32/59b0c7e63e277fa7911c2fc70ccfb45ce4b98991e7ef37110663437005af/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-macosx_10_12_x86_64.whl", hash = "sha256:7da7087d756b19037bc2c06edc6c170eeef3c3bafcb8f532ff17d64dc427adfd", size = 2110495, upload-time = "2025-11-04T13:42:49.689Z" }, + { url = "https://files.pythonhosted.org/packages/aa/81/05e400037eaf55ad400bcd318c05bb345b57e708887f07ddb2d20e3f0e98/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-macosx_11_0_arm64.whl", hash = "sha256:aabf5777b5c8ca26f7824cb4a120a740c9588ed58df9b2d196ce92fba42ff8dc", size = 1915388, upload-time = "2025-11-04T13:42:52.215Z" }, + { url = "https://files.pythonhosted.org/packages/6e/0d/e3549b2399f71d56476b77dbf3cf8937cec5cd70536bdc0e374a421d0599/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c007fe8a43d43b3969e8469004e9845944f1a80e6acd47c150856bb87f230c56", size = 1942879, upload-time = "2025-11-04T13:42:56.483Z" }, + { url = "https://files.pythonhosted.org/packages/f7/07/34573da085946b6a313d7c42f82f16e8920bfd730665de2d11c0c37a74b5/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:76d0819de158cd855d1cbb8fcafdf6f5cf1eb8e470abe056d5d161106e38062b", size = 2139017, upload-time = "2025-11-04T13:42:59.471Z" }, + { url = "https://files.pythonhosted.org/packages/e6/b0/1a2aa41e3b5a4ba11420aba2d091b2d17959c8d1519ece3627c371951e73/pydantic_core-2.41.5-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b5819cd790dbf0c5eb9f82c73c16b39a65dd6dd4d1439dcdea7816ec9adddab8", size = 2103351, upload-time = "2025-11-04T13:43:02.058Z" }, + { url = "https://files.pythonhosted.org/packages/a4/ee/31b1f0020baaf6d091c87900ae05c6aeae101fa4e188e1613c80e4f1ea31/pydantic_core-2.41.5-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:5a4e67afbc95fa5c34cf27d9089bca7fcab4e51e57278d710320a70b956d1b9a", size = 1925363, upload-time = "2025-11-04T13:43:05.159Z" }, + { url = "https://files.pythonhosted.org/packages/e1/89/ab8e86208467e467a80deaca4e434adac37b10a9d134cd2f99b28a01e483/pydantic_core-2.41.5-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ece5c59f0ce7d001e017643d8d24da587ea1f74f6993467d85ae8a5ef9d4f42b", size = 2135615, upload-time = "2025-11-04T13:43:08.116Z" }, + { url = "https://files.pythonhosted.org/packages/99/0a/99a53d06dd0348b2008f2f30884b34719c323f16c3be4e6cc1203b74a91d/pydantic_core-2.41.5-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:16f80f7abe3351f8ea6858914ddc8c77e02578544a0ebc15b4c2e1a0e813b0b2", size = 2175369, upload-time = "2025-11-04T13:43:12.49Z" }, + { url = "https://files.pythonhosted.org/packages/6d/94/30ca3b73c6d485b9bb0bc66e611cff4a7138ff9736b7e66bcf0852151636/pydantic_core-2.41.5-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:33cb885e759a705b426baada1fe68cbb0a2e68e34c5d0d0289a364cf01709093", size = 2144218, upload-time = "2025-11-04T13:43:15.431Z" }, + { url = "https://files.pythonhosted.org/packages/87/57/31b4f8e12680b739a91f472b5671294236b82586889ef764b5fbc6669238/pydantic_core-2.41.5-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:c8d8b4eb992936023be7dee581270af5c6e0697a8559895f527f5b7105ecd36a", size = 2329951, upload-time = "2025-11-04T13:43:18.062Z" }, + { url = "https://files.pythonhosted.org/packages/7d/73/3c2c8edef77b8f7310e6fb012dbc4b8551386ed575b9eb6fb2506e28a7eb/pydantic_core-2.41.5-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:242a206cd0318f95cd21bdacff3fcc3aab23e79bba5cac3db5a841c9ef9c6963", size = 2318428, upload-time = "2025-11-04T13:43:20.679Z" }, + { url = "https://files.pythonhosted.org/packages/2f/02/8559b1f26ee0d502c74f9cca5c0d2fd97e967e083e006bbbb4e97f3a043a/pydantic_core-2.41.5-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:d3a978c4f57a597908b7e697229d996d77a6d3c94901e9edee593adada95ce1a", size = 2147009, upload-time = "2025-11-04T13:43:23.286Z" }, + { url = "https://files.pythonhosted.org/packages/5f/9b/1b3f0e9f9305839d7e84912f9e8bfbd191ed1b1ef48083609f0dabde978c/pydantic_core-2.41.5-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b2379fa7ed44ddecb5bfe4e48577d752db9fc10be00a6b7446e9663ba143de26", size = 2101980, upload-time = "2025-11-04T13:43:25.97Z" }, + { url = "https://files.pythonhosted.org/packages/a4/ed/d71fefcb4263df0da6a85b5d8a7508360f2f2e9b3bf5814be9c8bccdccc1/pydantic_core-2.41.5-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:266fb4cbf5e3cbd0b53669a6d1b039c45e3ce651fd5442eff4d07c2cc8d66808", size = 1923865, upload-time = "2025-11-04T13:43:28.763Z" }, + { url = "https://files.pythonhosted.org/packages/ce/3a/626b38db460d675f873e4444b4bb030453bbe7b4ba55df821d026a0493c4/pydantic_core-2.41.5-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58133647260ea01e4d0500089a8c4f07bd7aa6ce109682b1426394988d8aaacc", size = 2134256, upload-time = "2025-11-04T13:43:31.71Z" }, + { url = "https://files.pythonhosted.org/packages/83/d9/8412d7f06f616bbc053d30cb4e5f76786af3221462ad5eee1f202021eb4e/pydantic_core-2.41.5-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:287dad91cfb551c363dc62899a80e9e14da1f0e2b6ebde82c806612ca2a13ef1", size = 2174762, upload-time = "2025-11-04T13:43:34.744Z" }, + { url = "https://files.pythonhosted.org/packages/55/4c/162d906b8e3ba3a99354e20faa1b49a85206c47de97a639510a0e673f5da/pydantic_core-2.41.5-pp311-pypy311_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:03b77d184b9eb40240ae9fd676ca364ce1085f203e1b1256f8ab9984dca80a84", size = 2143141, upload-time = "2025-11-04T13:43:37.701Z" }, + { url = "https://files.pythonhosted.org/packages/1f/f2/f11dd73284122713f5f89fc940f370d035fa8e1e078d446b3313955157fe/pydantic_core-2.41.5-pp311-pypy311_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:a668ce24de96165bb239160b3d854943128f4334822900534f2fe947930e5770", size = 2330317, upload-time = "2025-11-04T13:43:40.406Z" }, + { url = "https://files.pythonhosted.org/packages/88/9d/b06ca6acfe4abb296110fb1273a4d848a0bfb2ff65f3ee92127b3244e16b/pydantic_core-2.41.5-pp311-pypy311_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f14f8f046c14563f8eb3f45f499cc658ab8d10072961e07225e507adb700e93f", size = 2316992, upload-time = "2025-11-04T13:43:43.602Z" }, + { url = "https://files.pythonhosted.org/packages/36/c7/cfc8e811f061c841d7990b0201912c3556bfeb99cdcb7ed24adc8d6f8704/pydantic_core-2.41.5-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:56121965f7a4dc965bff783d70b907ddf3d57f6eba29b6d2e5dabfaf07799c51", size = 2145302, upload-time = "2025-11-04T13:43:46.64Z" }, +] + +[[package]] +name = "pydantic-settings" +version = "2.12.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pydantic" }, + { name = "python-dotenv" }, + { name = "typing-inspection" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/43/4b/ac7e0aae12027748076d72a8764ff1c9d82ca75a7a52622e67ed3f765c54/pydantic_settings-2.12.0.tar.gz", hash = "sha256:005538ef951e3c2a68e1c08b292b5f2e71490def8589d4221b95dab00dafcfd0", size = 194184, upload-time = "2025-11-10T14:25:47.013Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c1/60/5d4751ba3f4a40a6891f24eec885f51afd78d208498268c734e256fb13c4/pydantic_settings-2.12.0-py3-none-any.whl", hash = "sha256:fddb9fd99a5b18da837b29710391e945b1e30c135477f484084ee513adb93809", size = 51880, upload-time = "2025-11-10T14:25:45.546Z" }, +] + +[[package]] +name = "pygments" +version = "2.19.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b0/77/a5b8c569bf593b0140bde72ea885a803b82086995367bf2037de0159d924/pygments-2.19.2.tar.gz", hash = "sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887", size = 4968631, upload-time = "2025-06-21T13:39:12.283Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c7/21/705964c7812476f378728bdf590ca4b771ec72385c533964653c68e86bdc/pygments-2.19.2-py3-none-any.whl", hash = "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b", size = 1225217, upload-time = "2025-06-21T13:39:07.939Z" }, +] + +[[package]] +name = "pyjwt" +version = "2.10.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e7/46/bd74733ff231675599650d3e47f361794b22ef3e3770998dda30d3b63726/pyjwt-2.10.1.tar.gz", hash = "sha256:3cc5772eb20009233caf06e9d8a0577824723b44e6648ee0a2aedb6cf9381953", size = 87785, upload-time = "2024-11-28T03:43:29.933Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/61/ad/689f02752eeec26aed679477e80e632ef1b682313be70793d798c1d5fc8f/PyJWT-2.10.1-py3-none-any.whl", hash = "sha256:dcdd193e30abefd5debf142f9adfcdd2b58004e644f25406ffaebd50bd98dacb", size = 22997, upload-time = "2024-11-28T03:43:27.893Z" }, +] + +[package.optional-dependencies] +crypto = [ + { name = "cryptography" }, +] + +[[package]] +name = "pylint" +version = "3.2.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "astroid" }, + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "dill" }, + { name = "isort" }, + { name = "mccabe" }, + { name = "platformdirs" }, + { name = "tomli", marker = "python_full_version < '3.11'" }, + { name = "tomlkit" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/9a/e9/60280b14cc1012794120345ce378504cf17409e38cd88f455dc24e0ad6b5/pylint-3.2.3.tar.gz", hash = "sha256:02f6c562b215582386068d52a30f520d84fdbcf2a95fc7e855b816060d048b60", size = 1506739, upload-time = "2024-06-06T14:19:17.955Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/50/d3/d346f779cbc9384d8b805a7557b5f2b8ee9f842bffebec9fc6364d6ae183/pylint-3.2.3-py3-none-any.whl", hash = "sha256:b3d7d2708a3e04b4679e02d99e72329a8b7ee8afb8d04110682278781f889fa8", size = 519244, upload-time = "2024-06-06T14:19:13.228Z" }, +] + +[[package]] +name = "pyright" +version = "1.1.407" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "nodeenv" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a6/1b/0aa08ee42948b61745ac5b5b5ccaec4669e8884b53d31c8ec20b2fcd6b6f/pyright-1.1.407.tar.gz", hash = "sha256:099674dba5c10489832d4a4b2d302636152a9a42d317986c38474c76fe562262", size = 4122872, upload-time = "2025-10-24T23:17:15.145Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/dc/93/b69052907d032b00c40cb656d21438ec00b3a471733de137a3f65a49a0a0/pyright-1.1.407-py3-none-any.whl", hash = "sha256:6dd419f54fcc13f03b52285796d65e639786373f433e243f8b94cf93a7444d21", size = 5997008, upload-time = "2025-10-24T23:17:13.159Z" }, +] + +[[package]] +name = "pytest" +version = "8.4.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "exceptiongroup", marker = "python_full_version < '3.11'" }, + { name = "iniconfig" }, + { name = "packaging" }, + { name = "pluggy" }, + { name = "pygments" }, + { name = "tomli", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a3/5c/00a0e072241553e1a7496d638deababa67c5058571567b92a7eaa258397c/pytest-8.4.2.tar.gz", hash = "sha256:86c0d0b93306b961d58d62a4db4879f27fe25513d4b969df351abdddb3c30e01", size = 1519618, upload-time = "2025-09-04T14:34:22.711Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a8/a4/20da314d277121d6534b3a980b29035dcd51e6744bd79075a6ce8fa4eb8d/pytest-8.4.2-py3-none-any.whl", hash = "sha256:872f880de3fc3a5bdc88a11b39c9710c3497a547cfa9320bc3c5e62fbf272e79", size = 365750, upload-time = "2025-09-04T14:34:20.226Z" }, +] + +[[package]] +name = "pytest-asyncio" +version = "0.23.8" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pytest" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/de/b4/0b378b7bf26a8ae161c3890c0b48a91a04106c5713ce81b4b080ea2f4f18/pytest_asyncio-0.23.8.tar.gz", hash = "sha256:759b10b33a6dc61cce40a8bd5205e302978bbbcc00e279a8b61d9a6a3c82e4d3", size = 46920, upload-time = "2024-07-17T17:39:34.617Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ee/82/62e2d63639ecb0fbe8a7ee59ef0bc69a4669ec50f6d3459f74ad4e4189a2/pytest_asyncio-0.23.8-py3-none-any.whl", hash = "sha256:50265d892689a5faefb84df80819d1ecef566eb3549cf915dfb33569359d1ce2", size = 17663, upload-time = "2024-07-17T17:39:32.478Z" }, +] + +[[package]] +name = "python-dateutil" +version = "2.9.0.post0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "six" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/66/c0/0c8b6ad9f17a802ee498c46e004a0eb49bc148f2fd230864601a86dcf6db/python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3", size = 342432, upload-time = "2024-03-01T18:36:20.211Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ec/57/56b9bcc3c9c6a792fcbaf139543cee77261f3651ca9da0c93f5c1221264b/python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427", size = 229892, upload-time = "2024-03-01T18:36:18.57Z" }, +] + +[[package]] +name = "python-dotenv" +version = "1.2.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f0/26/19cadc79a718c5edbec86fd4919a6b6d3f681039a2f6d66d14be94e75fb9/python_dotenv-1.2.1.tar.gz", hash = "sha256:42667e897e16ab0d66954af0e60a9caa94f0fd4ecf3aaf6d2d260eec1aa36ad6", size = 44221, upload-time = "2025-10-26T15:12:10.434Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/14/1b/a298b06749107c305e1fe0f814c6c74aea7b2f1e10989cb30f544a1b3253/python_dotenv-1.2.1-py3-none-any.whl", hash = "sha256:b81ee9561e9ca4004139c6cbba3a238c32b03e4894671e181b671e8cb8425d61", size = 21230, upload-time = "2025-10-26T15:12:09.109Z" }, +] + +[[package]] +name = "python-multipart" +version = "0.0.21" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/78/96/804520d0850c7db98e5ccb70282e29208723f0964e88ffd9d0da2f52ea09/python_multipart-0.0.21.tar.gz", hash = "sha256:7137ebd4d3bbf70ea1622998f902b97a29434a9e8dc40eb203bbcf7c2a2cba92", size = 37196, upload-time = "2025-12-17T09:24:22.446Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/aa/76/03af049af4dcee5d27442f71b6924f01f3efb5d2bd34f23fcd563f2cc5f5/python_multipart-0.0.21-py3-none-any.whl", hash = "sha256:cf7a6713e01c87aa35387f4774e812c4361150938d20d232800f75ffcf266090", size = 24541, upload-time = "2025-12-17T09:24:21.153Z" }, +] + +[[package]] +name = "pywin32" +version = "311" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7b/40/44efbb0dfbd33aca6a6483191dae0716070ed99e2ecb0c53683f400a0b4f/pywin32-311-cp310-cp310-win32.whl", hash = "sha256:d03ff496d2a0cd4a5893504789d4a15399133fe82517455e78bad62efbb7f0a3", size = 8760432, upload-time = "2025-07-14T20:13:05.9Z" }, + { url = "https://files.pythonhosted.org/packages/5e/bf/360243b1e953bd254a82f12653974be395ba880e7ec23e3731d9f73921cc/pywin32-311-cp310-cp310-win_amd64.whl", hash = "sha256:797c2772017851984b97180b0bebe4b620bb86328e8a884bb626156295a63b3b", size = 9590103, upload-time = "2025-07-14T20:13:07.698Z" }, + { url = "https://files.pythonhosted.org/packages/57/38/d290720e6f138086fb3d5ffe0b6caa019a791dd57866940c82e4eeaf2012/pywin32-311-cp310-cp310-win_arm64.whl", hash = "sha256:0502d1facf1fed4839a9a51ccbcc63d952cf318f78ffc00a7e78528ac27d7a2b", size = 8778557, upload-time = "2025-07-14T20:13:11.11Z" }, + { url = "https://files.pythonhosted.org/packages/7c/af/449a6a91e5d6db51420875c54f6aff7c97a86a3b13a0b4f1a5c13b988de3/pywin32-311-cp311-cp311-win32.whl", hash = "sha256:184eb5e436dea364dcd3d2316d577d625c0351bf237c4e9a5fabbcfa5a58b151", size = 8697031, upload-time = "2025-07-14T20:13:13.266Z" }, + { url = "https://files.pythonhosted.org/packages/51/8f/9bb81dd5bb77d22243d33c8397f09377056d5c687aa6d4042bea7fbf8364/pywin32-311-cp311-cp311-win_amd64.whl", hash = "sha256:3ce80b34b22b17ccbd937a6e78e7225d80c52f5ab9940fe0506a1a16f3dab503", size = 9508308, upload-time = "2025-07-14T20:13:15.147Z" }, + { url = "https://files.pythonhosted.org/packages/44/7b/9c2ab54f74a138c491aba1b1cd0795ba61f144c711daea84a88b63dc0f6c/pywin32-311-cp311-cp311-win_arm64.whl", hash = "sha256:a733f1388e1a842abb67ffa8e7aad0e70ac519e09b0f6a784e65a136ec7cefd2", size = 8703930, upload-time = "2025-07-14T20:13:16.945Z" }, + { url = "https://files.pythonhosted.org/packages/e7/ab/01ea1943d4eba0f850c3c61e78e8dd59757ff815ff3ccd0a84de5f541f42/pywin32-311-cp312-cp312-win32.whl", hash = "sha256:750ec6e621af2b948540032557b10a2d43b0cee2ae9758c54154d711cc852d31", size = 8706543, upload-time = "2025-07-14T20:13:20.765Z" }, + { url = "https://files.pythonhosted.org/packages/d1/a8/a0e8d07d4d051ec7502cd58b291ec98dcc0c3fff027caad0470b72cfcc2f/pywin32-311-cp312-cp312-win_amd64.whl", hash = "sha256:b8c095edad5c211ff31c05223658e71bf7116daa0ecf3ad85f3201ea3190d067", size = 9495040, upload-time = "2025-07-14T20:13:22.543Z" }, + { url = "https://files.pythonhosted.org/packages/ba/3a/2ae996277b4b50f17d61f0603efd8253cb2d79cc7ae159468007b586396d/pywin32-311-cp312-cp312-win_arm64.whl", hash = "sha256:e286f46a9a39c4a18b319c28f59b61de793654af2f395c102b4f819e584b5852", size = 8710102, upload-time = "2025-07-14T20:13:24.682Z" }, + { url = "https://files.pythonhosted.org/packages/a5/be/3fd5de0979fcb3994bfee0d65ed8ca9506a8a1260651b86174f6a86f52b3/pywin32-311-cp313-cp313-win32.whl", hash = "sha256:f95ba5a847cba10dd8c4d8fefa9f2a6cf283b8b88ed6178fa8a6c1ab16054d0d", size = 8705700, upload-time = "2025-07-14T20:13:26.471Z" }, + { url = "https://files.pythonhosted.org/packages/e3/28/e0a1909523c6890208295a29e05c2adb2126364e289826c0a8bc7297bd5c/pywin32-311-cp313-cp313-win_amd64.whl", hash = "sha256:718a38f7e5b058e76aee1c56ddd06908116d35147e133427e59a3983f703a20d", size = 9494700, upload-time = "2025-07-14T20:13:28.243Z" }, + { url = "https://files.pythonhosted.org/packages/04/bf/90339ac0f55726dce7d794e6d79a18a91265bdf3aa70b6b9ca52f35e022a/pywin32-311-cp313-cp313-win_arm64.whl", hash = "sha256:7b4075d959648406202d92a2310cb990fea19b535c7f4a78d3f5e10b926eeb8a", size = 8709318, upload-time = "2025-07-14T20:13:30.348Z" }, + { url = "https://files.pythonhosted.org/packages/c9/31/097f2e132c4f16d99a22bfb777e0fd88bd8e1c634304e102f313af69ace5/pywin32-311-cp314-cp314-win32.whl", hash = "sha256:b7a2c10b93f8986666d0c803ee19b5990885872a7de910fc460f9b0c2fbf92ee", size = 8840714, upload-time = "2025-07-14T20:13:32.449Z" }, + { url = "https://files.pythonhosted.org/packages/90/4b/07c77d8ba0e01349358082713400435347df8426208171ce297da32c313d/pywin32-311-cp314-cp314-win_amd64.whl", hash = "sha256:3aca44c046bd2ed8c90de9cb8427f581c479e594e99b5c0bb19b29c10fd6cb87", size = 9656800, upload-time = "2025-07-14T20:13:34.312Z" }, + { url = "https://files.pythonhosted.org/packages/c0/d2/21af5c535501a7233e734b8af901574572da66fcc254cb35d0609c9080dd/pywin32-311-cp314-cp314-win_arm64.whl", hash = "sha256:a508e2d9025764a8270f93111a970e1d0fbfc33f4153b388bb649b7eec4f9b42", size = 8932540, upload-time = "2025-07-14T20:13:36.379Z" }, +] + +[[package]] +name = "pyyaml" +version = "6.0.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/05/8e/961c0007c59b8dd7729d542c61a4d537767a59645b82a0b521206e1e25c2/pyyaml-6.0.3.tar.gz", hash = "sha256:d76623373421df22fb4cf8817020cbb7ef15c725b9d5e45f17e189bfc384190f", size = 130960, upload-time = "2025-09-25T21:33:16.546Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f4/a0/39350dd17dd6d6c6507025c0e53aef67a9293a6d37d3511f23ea510d5800/pyyaml-6.0.3-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:214ed4befebe12df36bcc8bc2b64b396ca31be9304b8f59e25c11cf94a4c033b", size = 184227, upload-time = "2025-09-25T21:31:46.04Z" }, + { url = "https://files.pythonhosted.org/packages/05/14/52d505b5c59ce73244f59c7a50ecf47093ce4765f116cdb98286a71eeca2/pyyaml-6.0.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:02ea2dfa234451bbb8772601d7b8e426c2bfa197136796224e50e35a78777956", size = 174019, upload-time = "2025-09-25T21:31:47.706Z" }, + { url = "https://files.pythonhosted.org/packages/43/f7/0e6a5ae5599c838c696adb4e6330a59f463265bfa1e116cfd1fbb0abaaae/pyyaml-6.0.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b30236e45cf30d2b8e7b3e85881719e98507abed1011bf463a8fa23e9c3e98a8", size = 740646, upload-time = "2025-09-25T21:31:49.21Z" }, + { url = "https://files.pythonhosted.org/packages/2f/3a/61b9db1d28f00f8fd0ae760459a5c4bf1b941baf714e207b6eb0657d2578/pyyaml-6.0.3-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:66291b10affd76d76f54fad28e22e51719ef9ba22b29e1d7d03d6777a9174198", size = 840793, upload-time = "2025-09-25T21:31:50.735Z" }, + { url = "https://files.pythonhosted.org/packages/7a/1e/7acc4f0e74c4b3d9531e24739e0ab832a5edf40e64fbae1a9c01941cabd7/pyyaml-6.0.3-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9c7708761fccb9397fe64bbc0395abcae8c4bf7b0eac081e12b809bf47700d0b", size = 770293, upload-time = "2025-09-25T21:31:51.828Z" }, + { url = "https://files.pythonhosted.org/packages/8b/ef/abd085f06853af0cd59fa5f913d61a8eab65d7639ff2a658d18a25d6a89d/pyyaml-6.0.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:418cf3f2111bc80e0933b2cd8cd04f286338bb88bdc7bc8e6dd775ebde60b5e0", size = 732872, upload-time = "2025-09-25T21:31:53.282Z" }, + { url = "https://files.pythonhosted.org/packages/1f/15/2bc9c8faf6450a8b3c9fc5448ed869c599c0a74ba2669772b1f3a0040180/pyyaml-6.0.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:5e0b74767e5f8c593e8c9b5912019159ed0533c70051e9cce3e8b6aa699fcd69", size = 758828, upload-time = "2025-09-25T21:31:54.807Z" }, + { url = "https://files.pythonhosted.org/packages/a3/00/531e92e88c00f4333ce359e50c19b8d1de9fe8d581b1534e35ccfbc5f393/pyyaml-6.0.3-cp310-cp310-win32.whl", hash = "sha256:28c8d926f98f432f88adc23edf2e6d4921ac26fb084b028c733d01868d19007e", size = 142415, upload-time = "2025-09-25T21:31:55.885Z" }, + { url = "https://files.pythonhosted.org/packages/2a/fa/926c003379b19fca39dd4634818b00dec6c62d87faf628d1394e137354d4/pyyaml-6.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:bdb2c67c6c1390b63c6ff89f210c8fd09d9a1217a465701eac7316313c915e4c", size = 158561, upload-time = "2025-09-25T21:31:57.406Z" }, + { url = "https://files.pythonhosted.org/packages/6d/16/a95b6757765b7b031c9374925bb718d55e0a9ba8a1b6a12d25962ea44347/pyyaml-6.0.3-cp311-cp311-macosx_10_13_x86_64.whl", hash = "sha256:44edc647873928551a01e7a563d7452ccdebee747728c1080d881d68af7b997e", size = 185826, upload-time = "2025-09-25T21:31:58.655Z" }, + { url = "https://files.pythonhosted.org/packages/16/19/13de8e4377ed53079ee996e1ab0a9c33ec2faf808a4647b7b4c0d46dd239/pyyaml-6.0.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:652cb6edd41e718550aad172851962662ff2681490a8a711af6a4d288dd96824", size = 175577, upload-time = "2025-09-25T21:32:00.088Z" }, + { url = "https://files.pythonhosted.org/packages/0c/62/d2eb46264d4b157dae1275b573017abec435397aa59cbcdab6fc978a8af4/pyyaml-6.0.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:10892704fc220243f5305762e276552a0395f7beb4dbf9b14ec8fd43b57f126c", size = 775556, upload-time = "2025-09-25T21:32:01.31Z" }, + { url = "https://files.pythonhosted.org/packages/10/cb/16c3f2cf3266edd25aaa00d6c4350381c8b012ed6f5276675b9eba8d9ff4/pyyaml-6.0.3-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:850774a7879607d3a6f50d36d04f00ee69e7fc816450e5f7e58d7f17f1ae5c00", size = 882114, upload-time = "2025-09-25T21:32:03.376Z" }, + { url = "https://files.pythonhosted.org/packages/71/60/917329f640924b18ff085ab889a11c763e0b573da888e8404ff486657602/pyyaml-6.0.3-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b8bb0864c5a28024fac8a632c443c87c5aa6f215c0b126c449ae1a150412f31d", size = 806638, upload-time = "2025-09-25T21:32:04.553Z" }, + { url = "https://files.pythonhosted.org/packages/dd/6f/529b0f316a9fd167281a6c3826b5583e6192dba792dd55e3203d3f8e655a/pyyaml-6.0.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1d37d57ad971609cf3c53ba6a7e365e40660e3be0e5175fa9f2365a379d6095a", size = 767463, upload-time = "2025-09-25T21:32:06.152Z" }, + { url = "https://files.pythonhosted.org/packages/f2/6a/b627b4e0c1dd03718543519ffb2f1deea4a1e6d42fbab8021936a4d22589/pyyaml-6.0.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:37503bfbfc9d2c40b344d06b2199cf0e96e97957ab1c1b546fd4f87e53e5d3e4", size = 794986, upload-time = "2025-09-25T21:32:07.367Z" }, + { url = "https://files.pythonhosted.org/packages/45/91/47a6e1c42d9ee337c4839208f30d9f09caa9f720ec7582917b264defc875/pyyaml-6.0.3-cp311-cp311-win32.whl", hash = "sha256:8098f252adfa6c80ab48096053f512f2321f0b998f98150cea9bd23d83e1467b", size = 142543, upload-time = "2025-09-25T21:32:08.95Z" }, + { url = "https://files.pythonhosted.org/packages/da/e3/ea007450a105ae919a72393cb06f122f288ef60bba2dc64b26e2646fa315/pyyaml-6.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:9f3bfb4965eb874431221a3ff3fdcddc7e74e3b07799e0e84ca4a0f867d449bf", size = 158763, upload-time = "2025-09-25T21:32:09.96Z" }, + { url = "https://files.pythonhosted.org/packages/d1/33/422b98d2195232ca1826284a76852ad5a86fe23e31b009c9886b2d0fb8b2/pyyaml-6.0.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:7f047e29dcae44602496db43be01ad42fc6f1cc0d8cd6c83d342306c32270196", size = 182063, upload-time = "2025-09-25T21:32:11.445Z" }, + { url = "https://files.pythonhosted.org/packages/89/a0/6cf41a19a1f2f3feab0e9c0b74134aa2ce6849093d5517a0c550fe37a648/pyyaml-6.0.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:fc09d0aa354569bc501d4e787133afc08552722d3ab34836a80547331bb5d4a0", size = 173973, upload-time = "2025-09-25T21:32:12.492Z" }, + { url = "https://files.pythonhosted.org/packages/ed/23/7a778b6bd0b9a8039df8b1b1d80e2e2ad78aa04171592c8a5c43a56a6af4/pyyaml-6.0.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9149cad251584d5fb4981be1ecde53a1ca46c891a79788c0df828d2f166bda28", size = 775116, upload-time = "2025-09-25T21:32:13.652Z" }, + { url = "https://files.pythonhosted.org/packages/65/30/d7353c338e12baef4ecc1b09e877c1970bd3382789c159b4f89d6a70dc09/pyyaml-6.0.3-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5fdec68f91a0c6739b380c83b951e2c72ac0197ace422360e6d5a959d8d97b2c", size = 844011, upload-time = "2025-09-25T21:32:15.21Z" }, + { url = "https://files.pythonhosted.org/packages/8b/9d/b3589d3877982d4f2329302ef98a8026e7f4443c765c46cfecc8858c6b4b/pyyaml-6.0.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ba1cc08a7ccde2d2ec775841541641e4548226580ab850948cbfda66a1befcdc", size = 807870, upload-time = "2025-09-25T21:32:16.431Z" }, + { url = "https://files.pythonhosted.org/packages/05/c0/b3be26a015601b822b97d9149ff8cb5ead58c66f981e04fedf4e762f4bd4/pyyaml-6.0.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:8dc52c23056b9ddd46818a57b78404882310fb473d63f17b07d5c40421e47f8e", size = 761089, upload-time = "2025-09-25T21:32:17.56Z" }, + { url = "https://files.pythonhosted.org/packages/be/8e/98435a21d1d4b46590d5459a22d88128103f8da4c2d4cb8f14f2a96504e1/pyyaml-6.0.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:41715c910c881bc081f1e8872880d3c650acf13dfa8214bad49ed4cede7c34ea", size = 790181, upload-time = "2025-09-25T21:32:18.834Z" }, + { url = "https://files.pythonhosted.org/packages/74/93/7baea19427dcfbe1e5a372d81473250b379f04b1bd3c4c5ff825e2327202/pyyaml-6.0.3-cp312-cp312-win32.whl", hash = "sha256:96b533f0e99f6579b3d4d4995707cf36df9100d67e0c8303a0c55b27b5f99bc5", size = 137658, upload-time = "2025-09-25T21:32:20.209Z" }, + { url = "https://files.pythonhosted.org/packages/86/bf/899e81e4cce32febab4fb42bb97dcdf66bc135272882d1987881a4b519e9/pyyaml-6.0.3-cp312-cp312-win_amd64.whl", hash = "sha256:5fcd34e47f6e0b794d17de1b4ff496c00986e1c83f7ab2fb8fcfe9616ff7477b", size = 154003, upload-time = "2025-09-25T21:32:21.167Z" }, + { url = "https://files.pythonhosted.org/packages/1a/08/67bd04656199bbb51dbed1439b7f27601dfb576fb864099c7ef0c3e55531/pyyaml-6.0.3-cp312-cp312-win_arm64.whl", hash = "sha256:64386e5e707d03a7e172c0701abfb7e10f0fb753ee1d773128192742712a98fd", size = 140344, upload-time = "2025-09-25T21:32:22.617Z" }, + { url = "https://files.pythonhosted.org/packages/d1/11/0fd08f8192109f7169db964b5707a2f1e8b745d4e239b784a5a1dd80d1db/pyyaml-6.0.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:8da9669d359f02c0b91ccc01cac4a67f16afec0dac22c2ad09f46bee0697eba8", size = 181669, upload-time = "2025-09-25T21:32:23.673Z" }, + { url = "https://files.pythonhosted.org/packages/b1/16/95309993f1d3748cd644e02e38b75d50cbc0d9561d21f390a76242ce073f/pyyaml-6.0.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:2283a07e2c21a2aa78d9c4442724ec1eb15f5e42a723b99cb3d822d48f5f7ad1", size = 173252, upload-time = "2025-09-25T21:32:25.149Z" }, + { url = "https://files.pythonhosted.org/packages/50/31/b20f376d3f810b9b2371e72ef5adb33879b25edb7a6d072cb7ca0c486398/pyyaml-6.0.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ee2922902c45ae8ccada2c5b501ab86c36525b883eff4255313a253a3160861c", size = 767081, upload-time = "2025-09-25T21:32:26.575Z" }, + { url = "https://files.pythonhosted.org/packages/49/1e/a55ca81e949270d5d4432fbbd19dfea5321eda7c41a849d443dc92fd1ff7/pyyaml-6.0.3-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a33284e20b78bd4a18c8c2282d549d10bc8408a2a7ff57653c0cf0b9be0afce5", size = 841159, upload-time = "2025-09-25T21:32:27.727Z" }, + { url = "https://files.pythonhosted.org/packages/74/27/e5b8f34d02d9995b80abcef563ea1f8b56d20134d8f4e5e81733b1feceb2/pyyaml-6.0.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0f29edc409a6392443abf94b9cf89ce99889a1dd5376d94316ae5145dfedd5d6", size = 801626, upload-time = "2025-09-25T21:32:28.878Z" }, + { url = "https://files.pythonhosted.org/packages/f9/11/ba845c23988798f40e52ba45f34849aa8a1f2d4af4b798588010792ebad6/pyyaml-6.0.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f7057c9a337546edc7973c0d3ba84ddcdf0daa14533c2065749c9075001090e6", size = 753613, upload-time = "2025-09-25T21:32:30.178Z" }, + { url = "https://files.pythonhosted.org/packages/3d/e0/7966e1a7bfc0a45bf0a7fb6b98ea03fc9b8d84fa7f2229e9659680b69ee3/pyyaml-6.0.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:eda16858a3cab07b80edaf74336ece1f986ba330fdb8ee0d6c0d68fe82bc96be", size = 794115, upload-time = "2025-09-25T21:32:31.353Z" }, + { url = "https://files.pythonhosted.org/packages/de/94/980b50a6531b3019e45ddeada0626d45fa85cbe22300844a7983285bed3b/pyyaml-6.0.3-cp313-cp313-win32.whl", hash = "sha256:d0eae10f8159e8fdad514efdc92d74fd8d682c933a6dd088030f3834bc8e6b26", size = 137427, upload-time = "2025-09-25T21:32:32.58Z" }, + { url = "https://files.pythonhosted.org/packages/97/c9/39d5b874e8b28845e4ec2202b5da735d0199dbe5b8fb85f91398814a9a46/pyyaml-6.0.3-cp313-cp313-win_amd64.whl", hash = "sha256:79005a0d97d5ddabfeeea4cf676af11e647e41d81c9a7722a193022accdb6b7c", size = 154090, upload-time = "2025-09-25T21:32:33.659Z" }, + { url = "https://files.pythonhosted.org/packages/73/e8/2bdf3ca2090f68bb3d75b44da7bbc71843b19c9f2b9cb9b0f4ab7a5a4329/pyyaml-6.0.3-cp313-cp313-win_arm64.whl", hash = "sha256:5498cd1645aa724a7c71c8f378eb29ebe23da2fc0d7a08071d89469bf1d2defb", size = 140246, upload-time = "2025-09-25T21:32:34.663Z" }, + { url = "https://files.pythonhosted.org/packages/9d/8c/f4bd7f6465179953d3ac9bc44ac1a8a3e6122cf8ada906b4f96c60172d43/pyyaml-6.0.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:8d1fab6bb153a416f9aeb4b8763bc0f22a5586065f86f7664fc23339fc1c1fac", size = 181814, upload-time = "2025-09-25T21:32:35.712Z" }, + { url = "https://files.pythonhosted.org/packages/bd/9c/4d95bb87eb2063d20db7b60faa3840c1b18025517ae857371c4dd55a6b3a/pyyaml-6.0.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:34d5fcd24b8445fadc33f9cf348c1047101756fd760b4dacb5c3e99755703310", size = 173809, upload-time = "2025-09-25T21:32:36.789Z" }, + { url = "https://files.pythonhosted.org/packages/92/b5/47e807c2623074914e29dabd16cbbdd4bf5e9b2db9f8090fa64411fc5382/pyyaml-6.0.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:501a031947e3a9025ed4405a168e6ef5ae3126c59f90ce0cd6f2bfc477be31b7", size = 766454, upload-time = "2025-09-25T21:32:37.966Z" }, + { url = "https://files.pythonhosted.org/packages/02/9e/e5e9b168be58564121efb3de6859c452fccde0ab093d8438905899a3a483/pyyaml-6.0.3-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:b3bc83488de33889877a0f2543ade9f70c67d66d9ebb4ac959502e12de895788", size = 836355, upload-time = "2025-09-25T21:32:39.178Z" }, + { url = "https://files.pythonhosted.org/packages/88/f9/16491d7ed2a919954993e48aa941b200f38040928474c9e85ea9e64222c3/pyyaml-6.0.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c458b6d084f9b935061bc36216e8a69a7e293a2f1e68bf956dcd9e6cbcd143f5", size = 794175, upload-time = "2025-09-25T21:32:40.865Z" }, + { url = "https://files.pythonhosted.org/packages/dd/3f/5989debef34dc6397317802b527dbbafb2b4760878a53d4166579111411e/pyyaml-6.0.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:7c6610def4f163542a622a73fb39f534f8c101d690126992300bf3207eab9764", size = 755228, upload-time = "2025-09-25T21:32:42.084Z" }, + { url = "https://files.pythonhosted.org/packages/d7/ce/af88a49043cd2e265be63d083fc75b27b6ed062f5f9fd6cdc223ad62f03e/pyyaml-6.0.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:5190d403f121660ce8d1d2c1bb2ef1bd05b5f68533fc5c2ea899bd15f4399b35", size = 789194, upload-time = "2025-09-25T21:32:43.362Z" }, + { url = "https://files.pythonhosted.org/packages/23/20/bb6982b26a40bb43951265ba29d4c246ef0ff59c9fdcdf0ed04e0687de4d/pyyaml-6.0.3-cp314-cp314-win_amd64.whl", hash = "sha256:4a2e8cebe2ff6ab7d1050ecd59c25d4c8bd7e6f400f5f82b96557ac0abafd0ac", size = 156429, upload-time = "2025-09-25T21:32:57.844Z" }, + { url = "https://files.pythonhosted.org/packages/f4/f4/a4541072bb9422c8a883ab55255f918fa378ecf083f5b85e87fc2b4eda1b/pyyaml-6.0.3-cp314-cp314-win_arm64.whl", hash = "sha256:93dda82c9c22deb0a405ea4dc5f2d0cda384168e466364dec6255b293923b2f3", size = 143912, upload-time = "2025-09-25T21:32:59.247Z" }, + { url = "https://files.pythonhosted.org/packages/7c/f9/07dd09ae774e4616edf6cda684ee78f97777bdd15847253637a6f052a62f/pyyaml-6.0.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:02893d100e99e03eda1c8fd5c441d8c60103fd175728e23e431db1b589cf5ab3", size = 189108, upload-time = "2025-09-25T21:32:44.377Z" }, + { url = "https://files.pythonhosted.org/packages/4e/78/8d08c9fb7ce09ad8c38ad533c1191cf27f7ae1effe5bb9400a46d9437fcf/pyyaml-6.0.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:c1ff362665ae507275af2853520967820d9124984e0f7466736aea23d8611fba", size = 183641, upload-time = "2025-09-25T21:32:45.407Z" }, + { url = "https://files.pythonhosted.org/packages/7b/5b/3babb19104a46945cf816d047db2788bcaf8c94527a805610b0289a01c6b/pyyaml-6.0.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6adc77889b628398debc7b65c073bcb99c4a0237b248cacaf3fe8a557563ef6c", size = 831901, upload-time = "2025-09-25T21:32:48.83Z" }, + { url = "https://files.pythonhosted.org/packages/8b/cc/dff0684d8dc44da4d22a13f35f073d558c268780ce3c6ba1b87055bb0b87/pyyaml-6.0.3-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a80cb027f6b349846a3bf6d73b5e95e782175e52f22108cfa17876aaeff93702", size = 861132, upload-time = "2025-09-25T21:32:50.149Z" }, + { url = "https://files.pythonhosted.org/packages/b1/5e/f77dc6b9036943e285ba76b49e118d9ea929885becb0a29ba8a7c75e29fe/pyyaml-6.0.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:00c4bdeba853cc34e7dd471f16b4114f4162dc03e6b7afcc2128711f0eca823c", size = 839261, upload-time = "2025-09-25T21:32:51.808Z" }, + { url = "https://files.pythonhosted.org/packages/ce/88/a9db1376aa2a228197c58b37302f284b5617f56a5d959fd1763fb1675ce6/pyyaml-6.0.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:66e1674c3ef6f541c35191caae2d429b967b99e02040f5ba928632d9a7f0f065", size = 805272, upload-time = "2025-09-25T21:32:52.941Z" }, + { url = "https://files.pythonhosted.org/packages/da/92/1446574745d74df0c92e6aa4a7b0b3130706a4142b2d1a5869f2eaa423c6/pyyaml-6.0.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:16249ee61e95f858e83976573de0f5b2893b3677ba71c9dd36b9cf8be9ac6d65", size = 829923, upload-time = "2025-09-25T21:32:54.537Z" }, + { url = "https://files.pythonhosted.org/packages/f0/7a/1c7270340330e575b92f397352af856a8c06f230aa3e76f86b39d01b416a/pyyaml-6.0.3-cp314-cp314t-win_amd64.whl", hash = "sha256:4ad1906908f2f5ae4e5a8ddfce73c320c2a1429ec52eafd27138b7f1cbe341c9", size = 174062, upload-time = "2025-09-25T21:32:55.767Z" }, + { url = "https://files.pythonhosted.org/packages/f1/12/de94a39c2ef588c7e6455cfbe7343d3b2dc9d6b6b2f40c4c6565744c873d/pyyaml-6.0.3-cp314-cp314t-win_arm64.whl", hash = "sha256:ebc55a14a21cb14062aa4162f906cd962b28e2e9ea38f9b4391244cd8de4ae0b", size = 149341, upload-time = "2025-09-25T21:32:56.828Z" }, +] + +[[package]] +name = "referencing" +version = "0.37.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "attrs" }, + { name = "rpds-py" }, + { name = "typing-extensions", marker = "python_full_version < '3.13'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/22/f5/df4e9027acead3ecc63e50fe1e36aca1523e1719559c499951bb4b53188f/referencing-0.37.0.tar.gz", hash = "sha256:44aefc3142c5b842538163acb373e24cce6632bd54bdb01b21ad5863489f50d8", size = 78036, upload-time = "2025-10-13T15:30:48.871Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2c/58/ca301544e1fa93ed4f80d724bf5b194f6e4b945841c5bfd555878eea9fcb/referencing-0.37.0-py3-none-any.whl", hash = "sha256:381329a9f99628c9069361716891d34ad94af76e461dcb0335825aecc7692231", size = 26766, upload-time = "2025-10-13T15:30:47.625Z" }, +] + +[[package]] +name = "requests" +version = "2.32.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "charset-normalizer" }, + { name = "idna" }, + { name = "urllib3" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c9/74/b3ff8e6c8446842c3f5c837e9c3dfcfe2018ea6ecef224c710c85ef728f4/requests-2.32.5.tar.gz", hash = "sha256:dbba0bac56e100853db0ea71b82b4dfd5fe2bf6d3754a8893c3af500cec7d7cf", size = 134517, upload-time = "2025-08-18T20:46:02.573Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1e/db/4254e3eabe8020b458f1a747140d32277ec7a271daf1d235b70dc0b4e6e3/requests-2.32.5-py3-none-any.whl", hash = "sha256:2462f94637a34fd532264295e186976db0f5d453d1cdd31473c85a6a161affb6", size = 64738, upload-time = "2025-08-18T20:46:00.542Z" }, +] + +[[package]] +name = "rpds-py" +version = "0.30.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/20/af/3f2f423103f1113b36230496629986e0ef7e199d2aa8392452b484b38ced/rpds_py-0.30.0.tar.gz", hash = "sha256:dd8ff7cf90014af0c0f787eea34794ebf6415242ee1d6fa91eaba725cc441e84", size = 69469, upload-time = "2025-11-30T20:24:38.837Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/06/0c/0c411a0ec64ccb6d104dcabe0e713e05e153a9a2c3c2bd2b32ce412166fe/rpds_py-0.30.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:679ae98e00c0e8d68a7fda324e16b90fd5260945b45d3b824c892cec9eea3288", size = 370490, upload-time = "2025-11-30T20:21:33.256Z" }, + { url = "https://files.pythonhosted.org/packages/19/6a/4ba3d0fb7297ebae71171822554abe48d7cab29c28b8f9f2c04b79988c05/rpds_py-0.30.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4cc2206b76b4f576934f0ed374b10d7ca5f457858b157ca52064bdfc26b9fc00", size = 359751, upload-time = "2025-11-30T20:21:34.591Z" }, + { url = "https://files.pythonhosted.org/packages/cd/7c/e4933565ef7f7a0818985d87c15d9d273f1a649afa6a52ea35ad011195ea/rpds_py-0.30.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:389a2d49eded1896c3d48b0136ead37c48e221b391c052fba3f4055c367f60a6", size = 389696, upload-time = "2025-11-30T20:21:36.122Z" }, + { url = "https://files.pythonhosted.org/packages/5e/01/6271a2511ad0815f00f7ed4390cf2567bec1d4b1da39e2c27a41e6e3b4de/rpds_py-0.30.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:32c8528634e1bf7121f3de08fa85b138f4e0dc47657866630611b03967f041d7", size = 403136, upload-time = "2025-11-30T20:21:37.728Z" }, + { url = "https://files.pythonhosted.org/packages/55/64/c857eb7cd7541e9b4eee9d49c196e833128a55b89a9850a9c9ac33ccf897/rpds_py-0.30.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f207f69853edd6f6700b86efb84999651baf3789e78a466431df1331608e5324", size = 524699, upload-time = "2025-11-30T20:21:38.92Z" }, + { url = "https://files.pythonhosted.org/packages/9c/ed/94816543404078af9ab26159c44f9e98e20fe47e2126d5d32c9d9948d10a/rpds_py-0.30.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:67b02ec25ba7a9e8fa74c63b6ca44cf5707f2fbfadae3ee8e7494297d56aa9df", size = 412022, upload-time = "2025-11-30T20:21:40.407Z" }, + { url = "https://files.pythonhosted.org/packages/61/b5/707f6cf0066a6412aacc11d17920ea2e19e5b2f04081c64526eb35b5c6e7/rpds_py-0.30.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0c0e95f6819a19965ff420f65578bacb0b00f251fefe2c8b23347c37174271f3", size = 390522, upload-time = "2025-11-30T20:21:42.17Z" }, + { url = "https://files.pythonhosted.org/packages/13/4e/57a85fda37a229ff4226f8cbcf09f2a455d1ed20e802ce5b2b4a7f5ed053/rpds_py-0.30.0-cp310-cp310-manylinux_2_31_riscv64.whl", hash = "sha256:a452763cc5198f2f98898eb98f7569649fe5da666c2dc6b5ddb10fde5a574221", size = 404579, upload-time = "2025-11-30T20:21:43.769Z" }, + { url = "https://files.pythonhosted.org/packages/f9/da/c9339293513ec680a721e0e16bf2bac3db6e5d7e922488de471308349bba/rpds_py-0.30.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e0b65193a413ccc930671c55153a03ee57cecb49e6227204b04fae512eb657a7", size = 421305, upload-time = "2025-11-30T20:21:44.994Z" }, + { url = "https://files.pythonhosted.org/packages/f9/be/522cb84751114f4ad9d822ff5a1aa3c98006341895d5f084779b99596e5c/rpds_py-0.30.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:858738e9c32147f78b3ac24dc0edb6610000e56dc0f700fd5f651d0a0f0eb9ff", size = 572503, upload-time = "2025-11-30T20:21:46.91Z" }, + { url = "https://files.pythonhosted.org/packages/a2/9b/de879f7e7ceddc973ea6e4629e9b380213a6938a249e94b0cdbcc325bb66/rpds_py-0.30.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:da279aa314f00acbb803da1e76fa18666778e8a8f83484fba94526da5de2cba7", size = 598322, upload-time = "2025-11-30T20:21:48.709Z" }, + { url = "https://files.pythonhosted.org/packages/48/ac/f01fc22efec3f37d8a914fc1b2fb9bcafd56a299edbe96406f3053edea5a/rpds_py-0.30.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:7c64d38fb49b6cdeda16ab49e35fe0da2e1e9b34bc38bd78386530f218b37139", size = 560792, upload-time = "2025-11-30T20:21:50.024Z" }, + { url = "https://files.pythonhosted.org/packages/e2/da/4e2b19d0f131f35b6146425f846563d0ce036763e38913d917187307a671/rpds_py-0.30.0-cp310-cp310-win32.whl", hash = "sha256:6de2a32a1665b93233cde140ff8b3467bdb9e2af2b91079f0333a0974d12d464", size = 221901, upload-time = "2025-11-30T20:21:51.32Z" }, + { url = "https://files.pythonhosted.org/packages/96/cb/156d7a5cf4f78a7cc571465d8aec7a3c447c94f6749c5123f08438bcf7bc/rpds_py-0.30.0-cp310-cp310-win_amd64.whl", hash = "sha256:1726859cd0de969f88dc8673bdd954185b9104e05806be64bcd87badbe313169", size = 235823, upload-time = "2025-11-30T20:21:52.505Z" }, + { url = "https://files.pythonhosted.org/packages/4d/6e/f964e88b3d2abee2a82c1ac8366da848fce1c6d834dc2132c3fda3970290/rpds_py-0.30.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:a2bffea6a4ca9f01b3f8e548302470306689684e61602aa3d141e34da06cf425", size = 370157, upload-time = "2025-11-30T20:21:53.789Z" }, + { url = "https://files.pythonhosted.org/packages/94/ba/24e5ebb7c1c82e74c4e4f33b2112a5573ddc703915b13a073737b59b86e0/rpds_py-0.30.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:dc4f992dfe1e2bc3ebc7444f6c7051b4bc13cd8e33e43511e8ffd13bf407010d", size = 359676, upload-time = "2025-11-30T20:21:55.475Z" }, + { url = "https://files.pythonhosted.org/packages/84/86/04dbba1b087227747d64d80c3b74df946b986c57af0a9f0c98726d4d7a3b/rpds_py-0.30.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:422c3cb9856d80b09d30d2eb255d0754b23e090034e1deb4083f8004bd0761e4", size = 389938, upload-time = "2025-11-30T20:21:57.079Z" }, + { url = "https://files.pythonhosted.org/packages/42/bb/1463f0b1722b7f45431bdd468301991d1328b16cffe0b1c2918eba2c4eee/rpds_py-0.30.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:07ae8a593e1c3c6b82ca3292efbe73c30b61332fd612e05abee07c79359f292f", size = 402932, upload-time = "2025-11-30T20:21:58.47Z" }, + { url = "https://files.pythonhosted.org/packages/99/ee/2520700a5c1f2d76631f948b0736cdf9b0acb25abd0ca8e889b5c62ac2e3/rpds_py-0.30.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:12f90dd7557b6bd57f40abe7747e81e0c0b119bef015ea7726e69fe550e394a4", size = 525830, upload-time = "2025-11-30T20:21:59.699Z" }, + { url = "https://files.pythonhosted.org/packages/e0/ad/bd0331f740f5705cc555a5e17fdf334671262160270962e69a2bdef3bf76/rpds_py-0.30.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:99b47d6ad9a6da00bec6aabe5a6279ecd3c06a329d4aa4771034a21e335c3a97", size = 412033, upload-time = "2025-11-30T20:22:00.991Z" }, + { url = "https://files.pythonhosted.org/packages/f8/1e/372195d326549bb51f0ba0f2ecb9874579906b97e08880e7a65c3bef1a99/rpds_py-0.30.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:33f559f3104504506a44bb666b93a33f5d33133765b0c216a5bf2f1e1503af89", size = 390828, upload-time = "2025-11-30T20:22:02.723Z" }, + { url = "https://files.pythonhosted.org/packages/ab/2b/d88bb33294e3e0c76bc8f351a3721212713629ffca1700fa94979cb3eae8/rpds_py-0.30.0-cp311-cp311-manylinux_2_31_riscv64.whl", hash = "sha256:946fe926af6e44f3697abbc305ea168c2c31d3e3ef1058cf68f379bf0335a78d", size = 404683, upload-time = "2025-11-30T20:22:04.367Z" }, + { url = "https://files.pythonhosted.org/packages/50/32/c759a8d42bcb5289c1fac697cd92f6fe01a018dd937e62ae77e0e7f15702/rpds_py-0.30.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:495aeca4b93d465efde585977365187149e75383ad2684f81519f504f5c13038", size = 421583, upload-time = "2025-11-30T20:22:05.814Z" }, + { url = "https://files.pythonhosted.org/packages/2b/81/e729761dbd55ddf5d84ec4ff1f47857f4374b0f19bdabfcf929164da3e24/rpds_py-0.30.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d9a0ca5da0386dee0655b4ccdf46119df60e0f10da268d04fe7cc87886872ba7", size = 572496, upload-time = "2025-11-30T20:22:07.713Z" }, + { url = "https://files.pythonhosted.org/packages/14/f6/69066a924c3557c9c30baa6ec3a0aa07526305684c6f86c696b08860726c/rpds_py-0.30.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:8d6d1cc13664ec13c1b84241204ff3b12f9bb82464b8ad6e7a5d3486975c2eed", size = 598669, upload-time = "2025-11-30T20:22:09.312Z" }, + { url = "https://files.pythonhosted.org/packages/5f/48/905896b1eb8a05630d20333d1d8ffd162394127b74ce0b0784ae04498d32/rpds_py-0.30.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:3896fa1be39912cf0757753826bc8bdc8ca331a28a7c4ae46b7a21280b06bb85", size = 561011, upload-time = "2025-11-30T20:22:11.309Z" }, + { url = "https://files.pythonhosted.org/packages/22/16/cd3027c7e279d22e5eb431dd3c0fbc677bed58797fe7581e148f3f68818b/rpds_py-0.30.0-cp311-cp311-win32.whl", hash = "sha256:55f66022632205940f1827effeff17c4fa7ae1953d2b74a8581baaefb7d16f8c", size = 221406, upload-time = "2025-11-30T20:22:13.101Z" }, + { url = "https://files.pythonhosted.org/packages/fa/5b/e7b7aa136f28462b344e652ee010d4de26ee9fd16f1bfd5811f5153ccf89/rpds_py-0.30.0-cp311-cp311-win_amd64.whl", hash = "sha256:a51033ff701fca756439d641c0ad09a41d9242fa69121c7d8769604a0a629825", size = 236024, upload-time = "2025-11-30T20:22:14.853Z" }, + { url = "https://files.pythonhosted.org/packages/14/a6/364bba985e4c13658edb156640608f2c9e1d3ea3c81b27aa9d889fff0e31/rpds_py-0.30.0-cp311-cp311-win_arm64.whl", hash = "sha256:47b0ef6231c58f506ef0b74d44e330405caa8428e770fec25329ed2cb971a229", size = 229069, upload-time = "2025-11-30T20:22:16.577Z" }, + { url = "https://files.pythonhosted.org/packages/03/e7/98a2f4ac921d82f33e03f3835f5bf3a4a40aa1bfdc57975e74a97b2b4bdd/rpds_py-0.30.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:a161f20d9a43006833cd7068375a94d035714d73a172b681d8881820600abfad", size = 375086, upload-time = "2025-11-30T20:22:17.93Z" }, + { url = "https://files.pythonhosted.org/packages/4d/a1/bca7fd3d452b272e13335db8d6b0b3ecde0f90ad6f16f3328c6fb150c889/rpds_py-0.30.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6abc8880d9d036ecaafe709079969f56e876fcf107f7a8e9920ba6d5a3878d05", size = 359053, upload-time = "2025-11-30T20:22:19.297Z" }, + { url = "https://files.pythonhosted.org/packages/65/1c/ae157e83a6357eceff62ba7e52113e3ec4834a84cfe07fa4b0757a7d105f/rpds_py-0.30.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca28829ae5f5d569bb62a79512c842a03a12576375d5ece7d2cadf8abe96ec28", size = 390763, upload-time = "2025-11-30T20:22:21.661Z" }, + { url = "https://files.pythonhosted.org/packages/d4/36/eb2eb8515e2ad24c0bd43c3ee9cd74c33f7ca6430755ccdb240fd3144c44/rpds_py-0.30.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a1010ed9524c73b94d15919ca4d41d8780980e1765babf85f9a2f90d247153dd", size = 408951, upload-time = "2025-11-30T20:22:23.408Z" }, + { url = "https://files.pythonhosted.org/packages/d6/65/ad8dc1784a331fabbd740ef6f71ce2198c7ed0890dab595adb9ea2d775a1/rpds_py-0.30.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f8d1736cfb49381ba528cd5baa46f82fdc65c06e843dab24dd70b63d09121b3f", size = 514622, upload-time = "2025-11-30T20:22:25.16Z" }, + { url = "https://files.pythonhosted.org/packages/63/8e/0cfa7ae158e15e143fe03993b5bcd743a59f541f5952e1546b1ac1b5fd45/rpds_py-0.30.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d948b135c4693daff7bc2dcfc4ec57237a29bd37e60c2fabf5aff2bbacf3e2f1", size = 414492, upload-time = "2025-11-30T20:22:26.505Z" }, + { url = "https://files.pythonhosted.org/packages/60/1b/6f8f29f3f995c7ffdde46a626ddccd7c63aefc0efae881dc13b6e5d5bb16/rpds_py-0.30.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47f236970bccb2233267d89173d3ad2703cd36a0e2a6e92d0560d333871a3d23", size = 394080, upload-time = "2025-11-30T20:22:27.934Z" }, + { url = "https://files.pythonhosted.org/packages/6d/d5/a266341051a7a3ca2f4b750a3aa4abc986378431fc2da508c5034d081b70/rpds_py-0.30.0-cp312-cp312-manylinux_2_31_riscv64.whl", hash = "sha256:2e6ecb5a5bcacf59c3f912155044479af1d0b6681280048b338b28e364aca1f6", size = 408680, upload-time = "2025-11-30T20:22:29.341Z" }, + { url = "https://files.pythonhosted.org/packages/10/3b/71b725851df9ab7a7a4e33cf36d241933da66040d195a84781f49c50490c/rpds_py-0.30.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a8fa71a2e078c527c3e9dc9fc5a98c9db40bcc8a92b4e8858e36d329f8684b51", size = 423589, upload-time = "2025-11-30T20:22:31.469Z" }, + { url = "https://files.pythonhosted.org/packages/00/2b/e59e58c544dc9bd8bd8384ecdb8ea91f6727f0e37a7131baeff8d6f51661/rpds_py-0.30.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:73c67f2db7bc334e518d097c6d1e6fed021bbc9b7d678d6cc433478365d1d5f5", size = 573289, upload-time = "2025-11-30T20:22:32.997Z" }, + { url = "https://files.pythonhosted.org/packages/da/3e/a18e6f5b460893172a7d6a680e86d3b6bc87a54c1f0b03446a3c8c7b588f/rpds_py-0.30.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:5ba103fb455be00f3b1c2076c9d4264bfcb037c976167a6047ed82f23153f02e", size = 599737, upload-time = "2025-11-30T20:22:34.419Z" }, + { url = "https://files.pythonhosted.org/packages/5c/e2/714694e4b87b85a18e2c243614974413c60aa107fd815b8cbc42b873d1d7/rpds_py-0.30.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:7cee9c752c0364588353e627da8a7e808a66873672bcb5f52890c33fd965b394", size = 563120, upload-time = "2025-11-30T20:22:35.903Z" }, + { url = "https://files.pythonhosted.org/packages/6f/ab/d5d5e3bcedb0a77f4f613706b750e50a5a3ba1c15ccd3665ecc636c968fd/rpds_py-0.30.0-cp312-cp312-win32.whl", hash = "sha256:1ab5b83dbcf55acc8b08fc62b796ef672c457b17dbd7820a11d6c52c06839bdf", size = 223782, upload-time = "2025-11-30T20:22:37.271Z" }, + { url = "https://files.pythonhosted.org/packages/39/3b/f786af9957306fdc38a74cef405b7b93180f481fb48453a114bb6465744a/rpds_py-0.30.0-cp312-cp312-win_amd64.whl", hash = "sha256:a090322ca841abd453d43456ac34db46e8b05fd9b3b4ac0c78bcde8b089f959b", size = 240463, upload-time = "2025-11-30T20:22:39.021Z" }, + { url = "https://files.pythonhosted.org/packages/f3/d2/b91dc748126c1559042cfe41990deb92c4ee3e2b415f6b5234969ffaf0cc/rpds_py-0.30.0-cp312-cp312-win_arm64.whl", hash = "sha256:669b1805bd639dd2989b281be2cfd951c6121b65e729d9b843e9639ef1fd555e", size = 230868, upload-time = "2025-11-30T20:22:40.493Z" }, + { url = "https://files.pythonhosted.org/packages/ed/dc/d61221eb88ff410de3c49143407f6f3147acf2538c86f2ab7ce65ae7d5f9/rpds_py-0.30.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:f83424d738204d9770830d35290ff3273fbb02b41f919870479fab14b9d303b2", size = 374887, upload-time = "2025-11-30T20:22:41.812Z" }, + { url = "https://files.pythonhosted.org/packages/fd/32/55fb50ae104061dbc564ef15cc43c013dc4a9f4527a1f4d99baddf56fe5f/rpds_py-0.30.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:e7536cd91353c5273434b4e003cbda89034d67e7710eab8761fd918ec6c69cf8", size = 358904, upload-time = "2025-11-30T20:22:43.479Z" }, + { url = "https://files.pythonhosted.org/packages/58/70/faed8186300e3b9bdd138d0273109784eea2396c68458ed580f885dfe7ad/rpds_py-0.30.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2771c6c15973347f50fece41fc447c054b7ac2ae0502388ce3b6738cd366e3d4", size = 389945, upload-time = "2025-11-30T20:22:44.819Z" }, + { url = "https://files.pythonhosted.org/packages/bd/a8/073cac3ed2c6387df38f71296d002ab43496a96b92c823e76f46b8af0543/rpds_py-0.30.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0a59119fc6e3f460315fe9d08149f8102aa322299deaa5cab5b40092345c2136", size = 407783, upload-time = "2025-11-30T20:22:46.103Z" }, + { url = "https://files.pythonhosted.org/packages/77/57/5999eb8c58671f1c11eba084115e77a8899d6e694d2a18f69f0ba471ec8b/rpds_py-0.30.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:76fec018282b4ead0364022e3c54b60bf368b9d926877957a8624b58419169b7", size = 515021, upload-time = "2025-11-30T20:22:47.458Z" }, + { url = "https://files.pythonhosted.org/packages/e0/af/5ab4833eadc36c0a8ed2bc5c0de0493c04f6c06de223170bd0798ff98ced/rpds_py-0.30.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:692bef75a5525db97318e8cd061542b5a79812d711ea03dbc1f6f8dbb0c5f0d2", size = 414589, upload-time = "2025-11-30T20:22:48.872Z" }, + { url = "https://files.pythonhosted.org/packages/b7/de/f7192e12b21b9e9a68a6d0f249b4af3fdcdff8418be0767a627564afa1f1/rpds_py-0.30.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9027da1ce107104c50c81383cae773ef5c24d296dd11c99e2629dbd7967a20c6", size = 394025, upload-time = "2025-11-30T20:22:50.196Z" }, + { url = "https://files.pythonhosted.org/packages/91/c4/fc70cd0249496493500e7cc2de87504f5aa6509de1e88623431fec76d4b6/rpds_py-0.30.0-cp313-cp313-manylinux_2_31_riscv64.whl", hash = "sha256:9cf69cdda1f5968a30a359aba2f7f9aa648a9ce4b580d6826437f2b291cfc86e", size = 408895, upload-time = "2025-11-30T20:22:51.87Z" }, + { url = "https://files.pythonhosted.org/packages/58/95/d9275b05ab96556fefff73a385813eb66032e4c99f411d0795372d9abcea/rpds_py-0.30.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a4796a717bf12b9da9d3ad002519a86063dcac8988b030e405704ef7d74d2d9d", size = 422799, upload-time = "2025-11-30T20:22:53.341Z" }, + { url = "https://files.pythonhosted.org/packages/06/c1/3088fc04b6624eb12a57eb814f0d4997a44b0d208d6cace713033ff1a6ba/rpds_py-0.30.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:5d4c2aa7c50ad4728a094ebd5eb46c452e9cb7edbfdb18f9e1221f597a73e1e7", size = 572731, upload-time = "2025-11-30T20:22:54.778Z" }, + { url = "https://files.pythonhosted.org/packages/d8/42/c612a833183b39774e8ac8fecae81263a68b9583ee343db33ab571a7ce55/rpds_py-0.30.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ba81a9203d07805435eb06f536d95a266c21e5b2dfbf6517748ca40c98d19e31", size = 599027, upload-time = "2025-11-30T20:22:56.212Z" }, + { url = "https://files.pythonhosted.org/packages/5f/60/525a50f45b01d70005403ae0e25f43c0384369ad24ffe46e8d9068b50086/rpds_py-0.30.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:945dccface01af02675628334f7cf49c2af4c1c904748efc5cf7bbdf0b579f95", size = 563020, upload-time = "2025-11-30T20:22:58.2Z" }, + { url = "https://files.pythonhosted.org/packages/0b/5d/47c4655e9bcd5ca907148535c10e7d489044243cc9941c16ed7cd53be91d/rpds_py-0.30.0-cp313-cp313-win32.whl", hash = "sha256:b40fb160a2db369a194cb27943582b38f79fc4887291417685f3ad693c5a1d5d", size = 223139, upload-time = "2025-11-30T20:23:00.209Z" }, + { url = "https://files.pythonhosted.org/packages/f2/e1/485132437d20aa4d3e1d8b3fb5a5e65aa8139f1e097080c2a8443201742c/rpds_py-0.30.0-cp313-cp313-win_amd64.whl", hash = "sha256:806f36b1b605e2d6a72716f321f20036b9489d29c51c91f4dd29a3e3afb73b15", size = 240224, upload-time = "2025-11-30T20:23:02.008Z" }, + { url = "https://files.pythonhosted.org/packages/24/95/ffd128ed1146a153d928617b0ef673960130be0009c77d8fbf0abe306713/rpds_py-0.30.0-cp313-cp313-win_arm64.whl", hash = "sha256:d96c2086587c7c30d44f31f42eae4eac89b60dabbac18c7669be3700f13c3ce1", size = 230645, upload-time = "2025-11-30T20:23:03.43Z" }, + { url = "https://files.pythonhosted.org/packages/ff/1b/b10de890a0def2a319a2626334a7f0ae388215eb60914dbac8a3bae54435/rpds_py-0.30.0-cp313-cp313t-macosx_10_12_x86_64.whl", hash = "sha256:eb0b93f2e5c2189ee831ee43f156ed34e2a89a78a66b98cadad955972548be5a", size = 364443, upload-time = "2025-11-30T20:23:04.878Z" }, + { url = "https://files.pythonhosted.org/packages/0d/bf/27e39f5971dc4f305a4fb9c672ca06f290f7c4e261c568f3dea16a410d47/rpds_py-0.30.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:922e10f31f303c7c920da8981051ff6d8c1a56207dbdf330d9047f6d30b70e5e", size = 353375, upload-time = "2025-11-30T20:23:06.342Z" }, + { url = "https://files.pythonhosted.org/packages/40/58/442ada3bba6e8e6615fc00483135c14a7538d2ffac30e2d933ccf6852232/rpds_py-0.30.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cdc62c8286ba9bf7f47befdcea13ea0e26bf294bda99758fd90535cbaf408000", size = 383850, upload-time = "2025-11-30T20:23:07.825Z" }, + { url = "https://files.pythonhosted.org/packages/14/14/f59b0127409a33c6ef6f5c1ebd5ad8e32d7861c9c7adfa9a624fc3889f6c/rpds_py-0.30.0-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:47f9a91efc418b54fb8190a6b4aa7813a23fb79c51f4bb84e418f5476c38b8db", size = 392812, upload-time = "2025-11-30T20:23:09.228Z" }, + { url = "https://files.pythonhosted.org/packages/b3/66/e0be3e162ac299b3a22527e8913767d869e6cc75c46bd844aa43fb81ab62/rpds_py-0.30.0-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1f3587eb9b17f3789ad50824084fa6f81921bbf9a795826570bda82cb3ed91f2", size = 517841, upload-time = "2025-11-30T20:23:11.186Z" }, + { url = "https://files.pythonhosted.org/packages/3d/55/fa3b9cf31d0c963ecf1ba777f7cf4b2a2c976795ac430d24a1f43d25a6ba/rpds_py-0.30.0-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:39c02563fc592411c2c61d26b6c5fe1e51eaa44a75aa2c8735ca88b0d9599daa", size = 408149, upload-time = "2025-11-30T20:23:12.864Z" }, + { url = "https://files.pythonhosted.org/packages/60/ca/780cf3b1a32b18c0f05c441958d3758f02544f1d613abf9488cd78876378/rpds_py-0.30.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:51a1234d8febafdfd33a42d97da7a43f5dcb120c1060e352a3fbc0c6d36e2083", size = 383843, upload-time = "2025-11-30T20:23:14.638Z" }, + { url = "https://files.pythonhosted.org/packages/82/86/d5f2e04f2aa6247c613da0c1dd87fcd08fa17107e858193566048a1e2f0a/rpds_py-0.30.0-cp313-cp313t-manylinux_2_31_riscv64.whl", hash = "sha256:eb2c4071ab598733724c08221091e8d80e89064cd472819285a9ab0f24bcedb9", size = 396507, upload-time = "2025-11-30T20:23:16.105Z" }, + { url = "https://files.pythonhosted.org/packages/4b/9a/453255d2f769fe44e07ea9785c8347edaf867f7026872e76c1ad9f7bed92/rpds_py-0.30.0-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6bdfdb946967d816e6adf9a3d8201bfad269c67efe6cefd7093ef959683c8de0", size = 414949, upload-time = "2025-11-30T20:23:17.539Z" }, + { url = "https://files.pythonhosted.org/packages/a3/31/622a86cdc0c45d6df0e9ccb6becdba5074735e7033c20e401a6d9d0e2ca0/rpds_py-0.30.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:c77afbd5f5250bf27bf516c7c4a016813eb2d3e116139aed0096940c5982da94", size = 565790, upload-time = "2025-11-30T20:23:19.029Z" }, + { url = "https://files.pythonhosted.org/packages/1c/5d/15bbf0fb4a3f58a3b1c67855ec1efcc4ceaef4e86644665fff03e1b66d8d/rpds_py-0.30.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:61046904275472a76c8c90c9ccee9013d70a6d0f73eecefd38c1ae7c39045a08", size = 590217, upload-time = "2025-11-30T20:23:20.885Z" }, + { url = "https://files.pythonhosted.org/packages/6d/61/21b8c41f68e60c8cc3b2e25644f0e3681926020f11d06ab0b78e3c6bbff1/rpds_py-0.30.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:4c5f36a861bc4b7da6516dbdf302c55313afa09b81931e8280361a4f6c9a2d27", size = 555806, upload-time = "2025-11-30T20:23:22.488Z" }, + { url = "https://files.pythonhosted.org/packages/f9/39/7e067bb06c31de48de3eb200f9fc7c58982a4d3db44b07e73963e10d3be9/rpds_py-0.30.0-cp313-cp313t-win32.whl", hash = "sha256:3d4a69de7a3e50ffc214ae16d79d8fbb0922972da0356dcf4d0fdca2878559c6", size = 211341, upload-time = "2025-11-30T20:23:24.449Z" }, + { url = "https://files.pythonhosted.org/packages/0a/4d/222ef0b46443cf4cf46764d9c630f3fe4abaa7245be9417e56e9f52b8f65/rpds_py-0.30.0-cp313-cp313t-win_amd64.whl", hash = "sha256:f14fc5df50a716f7ece6a80b6c78bb35ea2ca47c499e422aa4463455dd96d56d", size = 225768, upload-time = "2025-11-30T20:23:25.908Z" }, + { url = "https://files.pythonhosted.org/packages/86/81/dad16382ebbd3d0e0328776d8fd7ca94220e4fa0798d1dc5e7da48cb3201/rpds_py-0.30.0-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:68f19c879420aa08f61203801423f6cd5ac5f0ac4ac82a2368a9fcd6a9a075e0", size = 362099, upload-time = "2025-11-30T20:23:27.316Z" }, + { url = "https://files.pythonhosted.org/packages/2b/60/19f7884db5d5603edf3c6bce35408f45ad3e97e10007df0e17dd57af18f8/rpds_py-0.30.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:ec7c4490c672c1a0389d319b3a9cfcd098dcdc4783991553c332a15acf7249be", size = 353192, upload-time = "2025-11-30T20:23:29.151Z" }, + { url = "https://files.pythonhosted.org/packages/bf/c4/76eb0e1e72d1a9c4703c69607cec123c29028bff28ce41588792417098ac/rpds_py-0.30.0-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f251c812357a3fed308d684a5079ddfb9d933860fc6de89f2b7ab00da481e65f", size = 384080, upload-time = "2025-11-30T20:23:30.785Z" }, + { url = "https://files.pythonhosted.org/packages/72/87/87ea665e92f3298d1b26d78814721dc39ed8d2c74b86e83348d6b48a6f31/rpds_py-0.30.0-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ac98b175585ecf4c0348fd7b29c3864bda53b805c773cbf7bfdaffc8070c976f", size = 394841, upload-time = "2025-11-30T20:23:32.209Z" }, + { url = "https://files.pythonhosted.org/packages/77/ad/7783a89ca0587c15dcbf139b4a8364a872a25f861bdb88ed99f9b0dec985/rpds_py-0.30.0-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3e62880792319dbeb7eb866547f2e35973289e7d5696c6e295476448f5b63c87", size = 516670, upload-time = "2025-11-30T20:23:33.742Z" }, + { url = "https://files.pythonhosted.org/packages/5b/3c/2882bdac942bd2172f3da574eab16f309ae10a3925644e969536553cb4ee/rpds_py-0.30.0-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4e7fc54e0900ab35d041b0601431b0a0eb495f0851a0639b6ef90f7741b39a18", size = 408005, upload-time = "2025-11-30T20:23:35.253Z" }, + { url = "https://files.pythonhosted.org/packages/ce/81/9a91c0111ce1758c92516a3e44776920b579d9a7c09b2b06b642d4de3f0f/rpds_py-0.30.0-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47e77dc9822d3ad616c3d5759ea5631a75e5809d5a28707744ef79d7a1bcfcad", size = 382112, upload-time = "2025-11-30T20:23:36.842Z" }, + { url = "https://files.pythonhosted.org/packages/cf/8e/1da49d4a107027e5fbc64daeab96a0706361a2918da10cb41769244b805d/rpds_py-0.30.0-cp314-cp314-manylinux_2_31_riscv64.whl", hash = "sha256:b4dc1a6ff022ff85ecafef7979a2c6eb423430e05f1165d6688234e62ba99a07", size = 399049, upload-time = "2025-11-30T20:23:38.343Z" }, + { url = "https://files.pythonhosted.org/packages/df/5a/7ee239b1aa48a127570ec03becbb29c9d5a9eb092febbd1699d567cae859/rpds_py-0.30.0-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4559c972db3a360808309e06a74628b95eaccbf961c335c8fe0d590cf587456f", size = 415661, upload-time = "2025-11-30T20:23:40.263Z" }, + { url = "https://files.pythonhosted.org/packages/70/ea/caa143cf6b772f823bc7929a45da1fa83569ee49b11d18d0ada7f5ee6fd6/rpds_py-0.30.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:0ed177ed9bded28f8deb6ab40c183cd1192aa0de40c12f38be4d59cd33cb5c65", size = 565606, upload-time = "2025-11-30T20:23:42.186Z" }, + { url = "https://files.pythonhosted.org/packages/64/91/ac20ba2d69303f961ad8cf55bf7dbdb4763f627291ba3d0d7d67333cced9/rpds_py-0.30.0-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:ad1fa8db769b76ea911cb4e10f049d80bf518c104f15b3edb2371cc65375c46f", size = 591126, upload-time = "2025-11-30T20:23:44.086Z" }, + { url = "https://files.pythonhosted.org/packages/21/20/7ff5f3c8b00c8a95f75985128c26ba44503fb35b8e0259d812766ea966c7/rpds_py-0.30.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:46e83c697b1f1c72b50e5ee5adb4353eef7406fb3f2043d64c33f20ad1c2fc53", size = 553371, upload-time = "2025-11-30T20:23:46.004Z" }, + { url = "https://files.pythonhosted.org/packages/72/c7/81dadd7b27c8ee391c132a6b192111ca58d866577ce2d9b0ca157552cce0/rpds_py-0.30.0-cp314-cp314-win32.whl", hash = "sha256:ee454b2a007d57363c2dfd5b6ca4a5d7e2c518938f8ed3b706e37e5d470801ed", size = 215298, upload-time = "2025-11-30T20:23:47.696Z" }, + { url = "https://files.pythonhosted.org/packages/3e/d2/1aaac33287e8cfb07aab2e6b8ac1deca62f6f65411344f1433c55e6f3eb8/rpds_py-0.30.0-cp314-cp314-win_amd64.whl", hash = "sha256:95f0802447ac2d10bcc69f6dc28fe95fdf17940367b21d34e34c737870758950", size = 228604, upload-time = "2025-11-30T20:23:49.501Z" }, + { url = "https://files.pythonhosted.org/packages/e8/95/ab005315818cc519ad074cb7784dae60d939163108bd2b394e60dc7b5461/rpds_py-0.30.0-cp314-cp314-win_arm64.whl", hash = "sha256:613aa4771c99f03346e54c3f038e4cc574ac09a3ddfb0e8878487335e96dead6", size = 222391, upload-time = "2025-11-30T20:23:50.96Z" }, + { url = "https://files.pythonhosted.org/packages/9e/68/154fe0194d83b973cdedcdcc88947a2752411165930182ae41d983dcefa6/rpds_py-0.30.0-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:7e6ecfcb62edfd632e56983964e6884851786443739dbfe3582947e87274f7cb", size = 364868, upload-time = "2025-11-30T20:23:52.494Z" }, + { url = "https://files.pythonhosted.org/packages/83/69/8bbc8b07ec854d92a8b75668c24d2abcb1719ebf890f5604c61c9369a16f/rpds_py-0.30.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:a1d0bc22a7cdc173fedebb73ef81e07faef93692b8c1ad3733b67e31e1b6e1b8", size = 353747, upload-time = "2025-11-30T20:23:54.036Z" }, + { url = "https://files.pythonhosted.org/packages/ab/00/ba2e50183dbd9abcce9497fa5149c62b4ff3e22d338a30d690f9af970561/rpds_py-0.30.0-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d08f00679177226c4cb8c5265012eea897c8ca3b93f429e546600c971bcbae7", size = 383795, upload-time = "2025-11-30T20:23:55.556Z" }, + { url = "https://files.pythonhosted.org/packages/05/6f/86f0272b84926bcb0e4c972262f54223e8ecc556b3224d281e6598fc9268/rpds_py-0.30.0-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5965af57d5848192c13534f90f9dd16464f3c37aaf166cc1da1cae1fd5a34898", size = 393330, upload-time = "2025-11-30T20:23:57.033Z" }, + { url = "https://files.pythonhosted.org/packages/cb/e9/0e02bb2e6dc63d212641da45df2b0bf29699d01715913e0d0f017ee29438/rpds_py-0.30.0-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9a4e86e34e9ab6b667c27f3211ca48f73dba7cd3d90f8d5b11be56e5dbc3fb4e", size = 518194, upload-time = "2025-11-30T20:23:58.637Z" }, + { url = "https://files.pythonhosted.org/packages/ee/ca/be7bca14cf21513bdf9c0606aba17d1f389ea2b6987035eb4f62bd923f25/rpds_py-0.30.0-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e5d3e6b26f2c785d65cc25ef1e5267ccbe1b069c5c21b8cc724efee290554419", size = 408340, upload-time = "2025-11-30T20:24:00.2Z" }, + { url = "https://files.pythonhosted.org/packages/c2/c7/736e00ebf39ed81d75544c0da6ef7b0998f8201b369acf842f9a90dc8fce/rpds_py-0.30.0-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:626a7433c34566535b6e56a1b39a7b17ba961e97ce3b80ec62e6f1312c025551", size = 383765, upload-time = "2025-11-30T20:24:01.759Z" }, + { url = "https://files.pythonhosted.org/packages/4a/3f/da50dfde9956aaf365c4adc9533b100008ed31aea635f2b8d7b627e25b49/rpds_py-0.30.0-cp314-cp314t-manylinux_2_31_riscv64.whl", hash = "sha256:acd7eb3f4471577b9b5a41baf02a978e8bdeb08b4b355273994f8b87032000a8", size = 396834, upload-time = "2025-11-30T20:24:03.687Z" }, + { url = "https://files.pythonhosted.org/packages/4e/00/34bcc2565b6020eab2623349efbdec810676ad571995911f1abdae62a3a0/rpds_py-0.30.0-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:fe5fa731a1fa8a0a56b0977413f8cacac1768dad38d16b3a296712709476fbd5", size = 415470, upload-time = "2025-11-30T20:24:05.232Z" }, + { url = "https://files.pythonhosted.org/packages/8c/28/882e72b5b3e6f718d5453bd4d0d9cf8df36fddeb4ddbbab17869d5868616/rpds_py-0.30.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:74a3243a411126362712ee1524dfc90c650a503502f135d54d1b352bd01f2404", size = 565630, upload-time = "2025-11-30T20:24:06.878Z" }, + { url = "https://files.pythonhosted.org/packages/3b/97/04a65539c17692de5b85c6e293520fd01317fd878ea1995f0367d4532fb1/rpds_py-0.30.0-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:3e8eeb0544f2eb0d2581774be4c3410356eba189529a6b3e36bbbf9696175856", size = 591148, upload-time = "2025-11-30T20:24:08.445Z" }, + { url = "https://files.pythonhosted.org/packages/85/70/92482ccffb96f5441aab93e26c4d66489eb599efdcf96fad90c14bbfb976/rpds_py-0.30.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:dbd936cde57abfee19ab3213cf9c26be06d60750e60a8e4dd85d1ab12c8b1f40", size = 556030, upload-time = "2025-11-30T20:24:10.956Z" }, + { url = "https://files.pythonhosted.org/packages/20/53/7c7e784abfa500a2b6b583b147ee4bb5a2b3747a9166bab52fec4b5b5e7d/rpds_py-0.30.0-cp314-cp314t-win32.whl", hash = "sha256:dc824125c72246d924f7f796b4f63c1e9dc810c7d9e2355864b3c3a73d59ade0", size = 211570, upload-time = "2025-11-30T20:24:12.735Z" }, + { url = "https://files.pythonhosted.org/packages/d0/02/fa464cdfbe6b26e0600b62c528b72d8608f5cc49f96b8d6e38c95d60c676/rpds_py-0.30.0-cp314-cp314t-win_amd64.whl", hash = "sha256:27f4b0e92de5bfbc6f86e43959e6edd1425c33b5e69aab0984a72047f2bcf1e3", size = 226532, upload-time = "2025-11-30T20:24:14.634Z" }, + { url = "https://files.pythonhosted.org/packages/69/71/3f34339ee70521864411f8b6992e7ab13ac30d8e4e3309e07c7361767d91/rpds_py-0.30.0-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:c2262bdba0ad4fc6fb5545660673925c2d2a5d9e2e0fb603aad545427be0fc58", size = 372292, upload-time = "2025-11-30T20:24:16.537Z" }, + { url = "https://files.pythonhosted.org/packages/57/09/f183df9b8f2d66720d2ef71075c59f7e1b336bec7ee4c48f0a2b06857653/rpds_py-0.30.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:ee6af14263f25eedc3bb918a3c04245106a42dfd4f5c2285ea6f997b1fc3f89a", size = 362128, upload-time = "2025-11-30T20:24:18.086Z" }, + { url = "https://files.pythonhosted.org/packages/7a/68/5c2594e937253457342e078f0cc1ded3dd7b2ad59afdbf2d354869110a02/rpds_py-0.30.0-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3adbb8179ce342d235c31ab8ec511e66c73faa27a47e076ccc92421add53e2bb", size = 391542, upload-time = "2025-11-30T20:24:20.092Z" }, + { url = "https://files.pythonhosted.org/packages/49/5c/31ef1afd70b4b4fbdb2800249f34c57c64beb687495b10aec0365f53dfc4/rpds_py-0.30.0-pp311-pypy311_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:250fa00e9543ac9b97ac258bd37367ff5256666122c2d0f2bc97577c60a1818c", size = 404004, upload-time = "2025-11-30T20:24:22.231Z" }, + { url = "https://files.pythonhosted.org/packages/e3/63/0cfbea38d05756f3440ce6534d51a491d26176ac045e2707adc99bb6e60a/rpds_py-0.30.0-pp311-pypy311_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9854cf4f488b3d57b9aaeb105f06d78e5529d3145b1e4a41750167e8c213c6d3", size = 527063, upload-time = "2025-11-30T20:24:24.302Z" }, + { url = "https://files.pythonhosted.org/packages/42/e6/01e1f72a2456678b0f618fc9a1a13f882061690893c192fcad9f2926553a/rpds_py-0.30.0-pp311-pypy311_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:993914b8e560023bc0a8bf742c5f303551992dcb85e247b1e5c7f4a7d145bda5", size = 413099, upload-time = "2025-11-30T20:24:25.916Z" }, + { url = "https://files.pythonhosted.org/packages/b8/25/8df56677f209003dcbb180765520c544525e3ef21ea72279c98b9aa7c7fb/rpds_py-0.30.0-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58edca431fb9b29950807e301826586e5bbf24163677732429770a697ffe6738", size = 392177, upload-time = "2025-11-30T20:24:27.834Z" }, + { url = "https://files.pythonhosted.org/packages/4a/b4/0a771378c5f16f8115f796d1f437950158679bcd2a7c68cf251cfb00ed5b/rpds_py-0.30.0-pp311-pypy311_pp73-manylinux_2_31_riscv64.whl", hash = "sha256:dea5b552272a944763b34394d04577cf0f9bd013207bc32323b5a89a53cf9c2f", size = 406015, upload-time = "2025-11-30T20:24:29.457Z" }, + { url = "https://files.pythonhosted.org/packages/36/d8/456dbba0af75049dc6f63ff295a2f92766b9d521fa00de67a2bd6427d57a/rpds_py-0.30.0-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ba3af48635eb83d03f6c9735dfb21785303e73d22ad03d489e88adae6eab8877", size = 423736, upload-time = "2025-11-30T20:24:31.22Z" }, + { url = "https://files.pythonhosted.org/packages/13/64/b4d76f227d5c45a7e0b796c674fd81b0a6c4fbd48dc29271857d8219571c/rpds_py-0.30.0-pp311-pypy311_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:dff13836529b921e22f15cb099751209a60009731a68519630a24d61f0b1b30a", size = 573981, upload-time = "2025-11-30T20:24:32.934Z" }, + { url = "https://files.pythonhosted.org/packages/20/91/092bacadeda3edf92bf743cc96a7be133e13a39cdbfd7b5082e7ab638406/rpds_py-0.30.0-pp311-pypy311_pp73-musllinux_1_2_i686.whl", hash = "sha256:1b151685b23929ab7beec71080a8889d4d6d9fa9a983d213f07121205d48e2c4", size = 599782, upload-time = "2025-11-30T20:24:35.169Z" }, + { url = "https://files.pythonhosted.org/packages/d1/b7/b95708304cd49b7b6f82fdd039f1748b66ec2b21d6a45180910802f1abf1/rpds_py-0.30.0-pp311-pypy311_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:ac37f9f516c51e5753f27dfdef11a88330f04de2d564be3991384b2f3535d02e", size = 562191, upload-time = "2025-11-30T20:24:36.853Z" }, +] + +[[package]] +name = "rsa" +version = "4.9.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pyasn1" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/da/8a/22b7beea3ee0d44b1916c0c1cb0ee3af23b700b6da9f04991899d0c555d4/rsa-4.9.1.tar.gz", hash = "sha256:e7bdbfdb5497da4c07dfd35530e1a902659db6ff241e39d9953cad06ebd0ae75", size = 29034, upload-time = "2025-04-16T09:51:18.218Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/64/8d/0133e4eb4beed9e425d9a98ed6e081a55d195481b7632472be1af08d2f6b/rsa-4.9.1-py3-none-any.whl", hash = "sha256:68635866661c6836b8d39430f97a996acbd61bfa49406748ea243539fe239762", size = 34696, upload-time = "2025-04-16T09:51:17.142Z" }, +] + +[[package]] +name = "ruff" +version = "0.11.13" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ed/da/9c6f995903b4d9474b39da91d2d626659af3ff1eeb43e9ae7c119349dba6/ruff-0.11.13.tar.gz", hash = "sha256:26fa247dc68d1d4e72c179e08889a25ac0c7ba4d78aecfc835d49cbfd60bf514", size = 4282054, upload-time = "2025-06-05T21:00:15.721Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7d/ce/a11d381192966e0b4290842cc8d4fac7dc9214ddf627c11c1afff87da29b/ruff-0.11.13-py3-none-linux_armv6l.whl", hash = "sha256:4bdfbf1240533f40042ec00c9e09a3aade6f8c10b6414cf11b519488d2635d46", size = 10292516, upload-time = "2025-06-05T20:59:32.944Z" }, + { url = "https://files.pythonhosted.org/packages/78/db/87c3b59b0d4e753e40b6a3b4a2642dfd1dcaefbff121ddc64d6c8b47ba00/ruff-0.11.13-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:aef9c9ed1b5ca28bb15c7eac83b8670cf3b20b478195bd49c8d756ba0a36cf48", size = 11106083, upload-time = "2025-06-05T20:59:37.03Z" }, + { url = "https://files.pythonhosted.org/packages/77/79/d8cec175856ff810a19825d09ce700265f905c643c69f45d2b737e4a470a/ruff-0.11.13-py3-none-macosx_11_0_arm64.whl", hash = "sha256:53b15a9dfdce029c842e9a5aebc3855e9ab7771395979ff85b7c1dedb53ddc2b", size = 10436024, upload-time = "2025-06-05T20:59:39.741Z" }, + { url = "https://files.pythonhosted.org/packages/8b/5b/f6d94f2980fa1ee854b41568368a2e1252681b9238ab2895e133d303538f/ruff-0.11.13-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ab153241400789138d13f362c43f7edecc0edfffce2afa6a68434000ecd8f69a", size = 10646324, upload-time = "2025-06-05T20:59:42.185Z" }, + { url = "https://files.pythonhosted.org/packages/6c/9c/b4c2acf24ea4426016d511dfdc787f4ce1ceb835f3c5fbdbcb32b1c63bda/ruff-0.11.13-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6c51f93029d54a910d3d24f7dd0bb909e31b6cd989a5e4ac513f4eb41629f0dc", size = 10174416, upload-time = "2025-06-05T20:59:44.319Z" }, + { url = "https://files.pythonhosted.org/packages/f3/10/e2e62f77c65ede8cd032c2ca39c41f48feabedb6e282bfd6073d81bb671d/ruff-0.11.13-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1808b3ed53e1a777c2ef733aca9051dc9bf7c99b26ece15cb59a0320fbdbd629", size = 11724197, upload-time = "2025-06-05T20:59:46.935Z" }, + { url = "https://files.pythonhosted.org/packages/bb/f0/466fe8469b85c561e081d798c45f8a1d21e0b4a5ef795a1d7f1a9a9ec182/ruff-0.11.13-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:d28ce58b5ecf0f43c1b71edffabe6ed7f245d5336b17805803312ec9bc665933", size = 12511615, upload-time = "2025-06-05T20:59:49.534Z" }, + { url = "https://files.pythonhosted.org/packages/17/0e/cefe778b46dbd0cbcb03a839946c8f80a06f7968eb298aa4d1a4293f3448/ruff-0.11.13-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:55e4bc3a77842da33c16d55b32c6cac1ec5fb0fbec9c8c513bdce76c4f922165", size = 12117080, upload-time = "2025-06-05T20:59:51.654Z" }, + { url = "https://files.pythonhosted.org/packages/5d/2c/caaeda564cbe103bed145ea557cb86795b18651b0f6b3ff6a10e84e5a33f/ruff-0.11.13-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:633bf2c6f35678c56ec73189ba6fa19ff1c5e4807a78bf60ef487b9dd272cc71", size = 11326315, upload-time = "2025-06-05T20:59:54.469Z" }, + { url = "https://files.pythonhosted.org/packages/75/f0/782e7d681d660eda8c536962920c41309e6dd4ebcea9a2714ed5127d44bd/ruff-0.11.13-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4ffbc82d70424b275b089166310448051afdc6e914fdab90e08df66c43bb5ca9", size = 11555640, upload-time = "2025-06-05T20:59:56.986Z" }, + { url = "https://files.pythonhosted.org/packages/5d/d4/3d580c616316c7f07fb3c99dbecfe01fbaea7b6fd9a82b801e72e5de742a/ruff-0.11.13-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:4a9ddd3ec62a9a89578c85842b836e4ac832d4a2e0bfaad3b02243f930ceafcc", size = 10507364, upload-time = "2025-06-05T20:59:59.154Z" }, + { url = "https://files.pythonhosted.org/packages/5a/dc/195e6f17d7b3ea6b12dc4f3e9de575db7983db187c378d44606e5d503319/ruff-0.11.13-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:d237a496e0778d719efb05058c64d28b757c77824e04ffe8796c7436e26712b7", size = 10141462, upload-time = "2025-06-05T21:00:01.481Z" }, + { url = "https://files.pythonhosted.org/packages/f4/8e/39a094af6967faa57ecdeacb91bedfb232474ff8c3d20f16a5514e6b3534/ruff-0.11.13-py3-none-musllinux_1_2_i686.whl", hash = "sha256:26816a218ca6ef02142343fd24c70f7cd8c5aa6c203bca284407adf675984432", size = 11121028, upload-time = "2025-06-05T21:00:04.06Z" }, + { url = "https://files.pythonhosted.org/packages/5a/c0/b0b508193b0e8a1654ec683ebab18d309861f8bd64e3a2f9648b80d392cb/ruff-0.11.13-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:51c3f95abd9331dc5b87c47ac7f376db5616041173826dfd556cfe3d4977f492", size = 11602992, upload-time = "2025-06-05T21:00:06.249Z" }, + { url = "https://files.pythonhosted.org/packages/7c/91/263e33ab93ab09ca06ce4f8f8547a858cc198072f873ebc9be7466790bae/ruff-0.11.13-py3-none-win32.whl", hash = "sha256:96c27935418e4e8e77a26bb05962817f28b8ef3843a6c6cc49d8783b5507f250", size = 10474944, upload-time = "2025-06-05T21:00:08.459Z" }, + { url = "https://files.pythonhosted.org/packages/46/f4/7c27734ac2073aae8efb0119cae6931b6fb48017adf048fdf85c19337afc/ruff-0.11.13-py3-none-win_amd64.whl", hash = "sha256:29c3189895a8a6a657b7af4e97d330c8a3afd2c9c8f46c81e2fc5a31866517e3", size = 11548669, upload-time = "2025-06-05T21:00:11.147Z" }, + { url = "https://files.pythonhosted.org/packages/ec/bf/b273dd11673fed8a6bd46032c0ea2a04b2ac9bfa9c628756a5856ba113b0/ruff-0.11.13-py3-none-win_arm64.whl", hash = "sha256:b4385285e9179d608ff1d2fb9922062663c658605819a6876d8beef0c30b7f3b", size = 10683928, upload-time = "2025-06-05T21:00:13.758Z" }, +] + +[[package]] +name = "six" +version = "1.17.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/94/e7/b2c673351809dca68a0e064b6af791aa332cf192da575fd474ed7d6f16a2/six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81", size = 34031, upload-time = "2024-12-04T17:35:28.174Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b7/ce/149a00dd41f10bc29e5921b496af8b574d8413afcd5e30dfa0ed46c2cc5e/six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274", size = 11050, upload-time = "2024-12-04T17:35:26.475Z" }, +] + +[[package]] +name = "sse-starlette" +version = "3.1.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "starlette" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/da/34/f5df66cb383efdbf4f2db23cabb27f51b1dcb737efaf8a558f6f1d195134/sse_starlette-3.1.2.tar.gz", hash = "sha256:55eff034207a83a0eb86de9a68099bd0157838f0b8b999a1b742005c71e33618", size = 26303, upload-time = "2025-12-31T08:02:20.023Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b7/95/8c4b76eec9ae574474e5d2997557cebf764bcd3586458956c30631ae08f4/sse_starlette-3.1.2-py3-none-any.whl", hash = "sha256:cd800dd349f4521b317b9391d3796fa97b71748a4da9b9e00aafab32dda375c8", size = 12484, upload-time = "2025-12-31T08:02:18.894Z" }, +] + +[[package]] +name = "starlette" +version = "0.50.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "typing-extensions", marker = "python_full_version < '3.13'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ba/b8/73a0e6a6e079a9d9cfa64113d771e421640b6f679a52eeb9b32f72d871a1/starlette-0.50.0.tar.gz", hash = "sha256:a2a17b22203254bcbc2e1f926d2d55f3f9497f769416b3190768befe598fa3ca", size = 2646985, upload-time = "2025-11-01T15:25:27.516Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d9/52/1064f510b141bd54025f9b55105e26d1fa970b9be67ad766380a3c9b74b0/starlette-0.50.0-py3-none-any.whl", hash = "sha256:9e5391843ec9b6e472eed1365a78c8098cfceb7a74bfd4d6b1c0c0095efb3bca", size = 74033, upload-time = "2025-11-01T15:25:25.461Z" }, +] + +[[package]] +name = "tomli" +version = "2.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/52/ed/3f73f72945444548f33eba9a87fc7a6e969915e7b1acc8260b30e1f76a2f/tomli-2.3.0.tar.gz", hash = "sha256:64be704a875d2a59753d80ee8a533c3fe183e3f06807ff7dc2232938ccb01549", size = 17392, upload-time = "2025-10-08T22:01:47.119Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b3/2e/299f62b401438d5fe1624119c723f5d877acc86a4c2492da405626665f12/tomli-2.3.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:88bd15eb972f3664f5ed4b57c1634a97153b4bac4479dcb6a495f41921eb7f45", size = 153236, upload-time = "2025-10-08T22:01:00.137Z" }, + { url = "https://files.pythonhosted.org/packages/86/7f/d8fffe6a7aefdb61bced88fcb5e280cfd71e08939da5894161bd71bea022/tomli-2.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:883b1c0d6398a6a9d29b508c331fa56adbcdff647f6ace4dfca0f50e90dfd0ba", size = 148084, upload-time = "2025-10-08T22:01:01.63Z" }, + { url = "https://files.pythonhosted.org/packages/47/5c/24935fb6a2ee63e86d80e4d3b58b222dafaf438c416752c8b58537c8b89a/tomli-2.3.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d1381caf13ab9f300e30dd8feadb3de072aeb86f1d34a8569453ff32a7dea4bf", size = 234832, upload-time = "2025-10-08T22:01:02.543Z" }, + { url = "https://files.pythonhosted.org/packages/89/da/75dfd804fc11e6612846758a23f13271b76d577e299592b4371a4ca4cd09/tomli-2.3.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a0e285d2649b78c0d9027570d4da3425bdb49830a6156121360b3f8511ea3441", size = 242052, upload-time = "2025-10-08T22:01:03.836Z" }, + { url = "https://files.pythonhosted.org/packages/70/8c/f48ac899f7b3ca7eb13af73bacbc93aec37f9c954df3c08ad96991c8c373/tomli-2.3.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:0a154a9ae14bfcf5d8917a59b51ffd5a3ac1fd149b71b47a3a104ca4edcfa845", size = 239555, upload-time = "2025-10-08T22:01:04.834Z" }, + { url = "https://files.pythonhosted.org/packages/ba/28/72f8afd73f1d0e7829bfc093f4cb98ce0a40ffc0cc997009ee1ed94ba705/tomli-2.3.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:74bf8464ff93e413514fefd2be591c3b0b23231a77f901db1eb30d6f712fc42c", size = 245128, upload-time = "2025-10-08T22:01:05.84Z" }, + { url = "https://files.pythonhosted.org/packages/b6/eb/a7679c8ac85208706d27436e8d421dfa39d4c914dcf5fa8083a9305f58d9/tomli-2.3.0-cp311-cp311-win32.whl", hash = "sha256:00b5f5d95bbfc7d12f91ad8c593a1659b6387b43f054104cda404be6bda62456", size = 96445, upload-time = "2025-10-08T22:01:06.896Z" }, + { url = "https://files.pythonhosted.org/packages/0a/fe/3d3420c4cb1ad9cb462fb52967080575f15898da97e21cb6f1361d505383/tomli-2.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:4dc4ce8483a5d429ab602f111a93a6ab1ed425eae3122032db7e9acf449451be", size = 107165, upload-time = "2025-10-08T22:01:08.107Z" }, + { url = "https://files.pythonhosted.org/packages/ff/b7/40f36368fcabc518bb11c8f06379a0fd631985046c038aca08c6d6a43c6e/tomli-2.3.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d7d86942e56ded512a594786a5ba0a5e521d02529b3826e7761a05138341a2ac", size = 154891, upload-time = "2025-10-08T22:01:09.082Z" }, + { url = "https://files.pythonhosted.org/packages/f9/3f/d9dd692199e3b3aab2e4e4dd948abd0f790d9ded8cd10cbaae276a898434/tomli-2.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:73ee0b47d4dad1c5e996e3cd33b8a76a50167ae5f96a2607cbe8cc773506ab22", size = 148796, upload-time = "2025-10-08T22:01:10.266Z" }, + { url = "https://files.pythonhosted.org/packages/60/83/59bff4996c2cf9f9387a0f5a3394629c7efa5ef16142076a23a90f1955fa/tomli-2.3.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:792262b94d5d0a466afb5bc63c7daa9d75520110971ee269152083270998316f", size = 242121, upload-time = "2025-10-08T22:01:11.332Z" }, + { url = "https://files.pythonhosted.org/packages/45/e5/7c5119ff39de8693d6baab6c0b6dcb556d192c165596e9fc231ea1052041/tomli-2.3.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4f195fe57ecceac95a66a75ac24d9d5fbc98ef0962e09b2eddec5d39375aae52", size = 250070, upload-time = "2025-10-08T22:01:12.498Z" }, + { url = "https://files.pythonhosted.org/packages/45/12/ad5126d3a278f27e6701abde51d342aa78d06e27ce2bb596a01f7709a5a2/tomli-2.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e31d432427dcbf4d86958c184b9bfd1e96b5b71f8eb17e6d02531f434fd335b8", size = 245859, upload-time = "2025-10-08T22:01:13.551Z" }, + { url = "https://files.pythonhosted.org/packages/fb/a1/4d6865da6a71c603cfe6ad0e6556c73c76548557a8d658f9e3b142df245f/tomli-2.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:7b0882799624980785240ab732537fcfc372601015c00f7fc367c55308c186f6", size = 250296, upload-time = "2025-10-08T22:01:14.614Z" }, + { url = "https://files.pythonhosted.org/packages/a0/b7/a7a7042715d55c9ba6e8b196d65d2cb662578b4d8cd17d882d45322b0d78/tomli-2.3.0-cp312-cp312-win32.whl", hash = "sha256:ff72b71b5d10d22ecb084d345fc26f42b5143c5533db5e2eaba7d2d335358876", size = 97124, upload-time = "2025-10-08T22:01:15.629Z" }, + { url = "https://files.pythonhosted.org/packages/06/1e/f22f100db15a68b520664eb3328fb0ae4e90530887928558112c8d1f4515/tomli-2.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:1cb4ed918939151a03f33d4242ccd0aa5f11b3547d0cf30f7c74a408a5b99878", size = 107698, upload-time = "2025-10-08T22:01:16.51Z" }, + { url = "https://files.pythonhosted.org/packages/89/48/06ee6eabe4fdd9ecd48bf488f4ac783844fd777f547b8d1b61c11939974e/tomli-2.3.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:5192f562738228945d7b13d4930baffda67b69425a7f0da96d360b0a3888136b", size = 154819, upload-time = "2025-10-08T22:01:17.964Z" }, + { url = "https://files.pythonhosted.org/packages/f1/01/88793757d54d8937015c75dcdfb673c65471945f6be98e6a0410fba167ed/tomli-2.3.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:be71c93a63d738597996be9528f4abe628d1adf5e6eb11607bc8fe1a510b5dae", size = 148766, upload-time = "2025-10-08T22:01:18.959Z" }, + { url = "https://files.pythonhosted.org/packages/42/17/5e2c956f0144b812e7e107f94f1cc54af734eb17b5191c0bbfb72de5e93e/tomli-2.3.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c4665508bcbac83a31ff8ab08f424b665200c0e1e645d2bd9ab3d3e557b6185b", size = 240771, upload-time = "2025-10-08T22:01:20.106Z" }, + { url = "https://files.pythonhosted.org/packages/d5/f4/0fbd014909748706c01d16824eadb0307115f9562a15cbb012cd9b3512c5/tomli-2.3.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4021923f97266babc6ccab9f5068642a0095faa0a51a246a6a02fccbb3514eaf", size = 248586, upload-time = "2025-10-08T22:01:21.164Z" }, + { url = "https://files.pythonhosted.org/packages/30/77/fed85e114bde5e81ecf9bc5da0cc69f2914b38f4708c80ae67d0c10180c5/tomli-2.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a4ea38c40145a357d513bffad0ed869f13c1773716cf71ccaa83b0fa0cc4e42f", size = 244792, upload-time = "2025-10-08T22:01:22.417Z" }, + { url = "https://files.pythonhosted.org/packages/55/92/afed3d497f7c186dc71e6ee6d4fcb0acfa5f7d0a1a2878f8beae379ae0cc/tomli-2.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ad805ea85eda330dbad64c7ea7a4556259665bdf9d2672f5dccc740eb9d3ca05", size = 248909, upload-time = "2025-10-08T22:01:23.859Z" }, + { url = "https://files.pythonhosted.org/packages/f8/84/ef50c51b5a9472e7265ce1ffc7f24cd4023d289e109f669bdb1553f6a7c2/tomli-2.3.0-cp313-cp313-win32.whl", hash = "sha256:97d5eec30149fd3294270e889b4234023f2c69747e555a27bd708828353ab606", size = 96946, upload-time = "2025-10-08T22:01:24.893Z" }, + { url = "https://files.pythonhosted.org/packages/b2/b7/718cd1da0884f281f95ccfa3a6cc572d30053cba64603f79d431d3c9b61b/tomli-2.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:0c95ca56fbe89e065c6ead5b593ee64b84a26fca063b5d71a1122bf26e533999", size = 107705, upload-time = "2025-10-08T22:01:26.153Z" }, + { url = "https://files.pythonhosted.org/packages/19/94/aeafa14a52e16163008060506fcb6aa1949d13548d13752171a755c65611/tomli-2.3.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:cebc6fe843e0733ee827a282aca4999b596241195f43b4cc371d64fc6639da9e", size = 154244, upload-time = "2025-10-08T22:01:27.06Z" }, + { url = "https://files.pythonhosted.org/packages/db/e4/1e58409aa78eefa47ccd19779fc6f36787edbe7d4cd330eeeedb33a4515b/tomli-2.3.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:4c2ef0244c75aba9355561272009d934953817c49f47d768070c3c94355c2aa3", size = 148637, upload-time = "2025-10-08T22:01:28.059Z" }, + { url = "https://files.pythonhosted.org/packages/26/b6/d1eccb62f665e44359226811064596dd6a366ea1f985839c566cd61525ae/tomli-2.3.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c22a8bf253bacc0cf11f35ad9808b6cb75ada2631c2d97c971122583b129afbc", size = 241925, upload-time = "2025-10-08T22:01:29.066Z" }, + { url = "https://files.pythonhosted.org/packages/70/91/7cdab9a03e6d3d2bb11beae108da5bdc1c34bdeb06e21163482544ddcc90/tomli-2.3.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0eea8cc5c5e9f89c9b90c4896a8deefc74f518db5927d0e0e8d4a80953d774d0", size = 249045, upload-time = "2025-10-08T22:01:31.98Z" }, + { url = "https://files.pythonhosted.org/packages/15/1b/8c26874ed1f6e4f1fcfeb868db8a794cbe9f227299402db58cfcc858766c/tomli-2.3.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:b74a0e59ec5d15127acdabd75ea17726ac4c5178ae51b85bfe39c4f8a278e879", size = 245835, upload-time = "2025-10-08T22:01:32.989Z" }, + { url = "https://files.pythonhosted.org/packages/fd/42/8e3c6a9a4b1a1360c1a2a39f0b972cef2cc9ebd56025168c4137192a9321/tomli-2.3.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:b5870b50c9db823c595983571d1296a6ff3e1b88f734a4c8f6fc6188397de005", size = 253109, upload-time = "2025-10-08T22:01:34.052Z" }, + { url = "https://files.pythonhosted.org/packages/22/0c/b4da635000a71b5f80130937eeac12e686eefb376b8dee113b4a582bba42/tomli-2.3.0-cp314-cp314-win32.whl", hash = "sha256:feb0dacc61170ed7ab602d3d972a58f14ee3ee60494292d384649a3dc38ef463", size = 97930, upload-time = "2025-10-08T22:01:35.082Z" }, + { url = "https://files.pythonhosted.org/packages/b9/74/cb1abc870a418ae99cd5c9547d6bce30701a954e0e721821df483ef7223c/tomli-2.3.0-cp314-cp314-win_amd64.whl", hash = "sha256:b273fcbd7fc64dc3600c098e39136522650c49bca95df2d11cf3b626422392c8", size = 107964, upload-time = "2025-10-08T22:01:36.057Z" }, + { url = "https://files.pythonhosted.org/packages/54/78/5c46fff6432a712af9f792944f4fcd7067d8823157949f4e40c56b8b3c83/tomli-2.3.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:940d56ee0410fa17ee1f12b817b37a4d4e4dc4d27340863cc67236c74f582e77", size = 163065, upload-time = "2025-10-08T22:01:37.27Z" }, + { url = "https://files.pythonhosted.org/packages/39/67/f85d9bd23182f45eca8939cd2bc7050e1f90c41f4a2ecbbd5963a1d1c486/tomli-2.3.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:f85209946d1fe94416debbb88d00eb92ce9cd5266775424ff81bc959e001acaf", size = 159088, upload-time = "2025-10-08T22:01:38.235Z" }, + { url = "https://files.pythonhosted.org/packages/26/5a/4b546a0405b9cc0659b399f12b6adb750757baf04250b148d3c5059fc4eb/tomli-2.3.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a56212bdcce682e56b0aaf79e869ba5d15a6163f88d5451cbde388d48b13f530", size = 268193, upload-time = "2025-10-08T22:01:39.712Z" }, + { url = "https://files.pythonhosted.org/packages/42/4f/2c12a72ae22cf7b59a7fe75b3465b7aba40ea9145d026ba41cb382075b0e/tomli-2.3.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c5f3ffd1e098dfc032d4d3af5c0ac64f6d286d98bc148698356847b80fa4de1b", size = 275488, upload-time = "2025-10-08T22:01:40.773Z" }, + { url = "https://files.pythonhosted.org/packages/92/04/a038d65dbe160c3aa5a624e93ad98111090f6804027d474ba9c37c8ae186/tomli-2.3.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:5e01decd096b1530d97d5d85cb4dff4af2d8347bd35686654a004f8dea20fc67", size = 272669, upload-time = "2025-10-08T22:01:41.824Z" }, + { url = "https://files.pythonhosted.org/packages/be/2f/8b7c60a9d1612a7cbc39ffcca4f21a73bf368a80fc25bccf8253e2563267/tomli-2.3.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:8a35dd0e643bb2610f156cca8db95d213a90015c11fee76c946aa62b7ae7e02f", size = 279709, upload-time = "2025-10-08T22:01:43.177Z" }, + { url = "https://files.pythonhosted.org/packages/7e/46/cc36c679f09f27ded940281c38607716c86cf8ba4a518d524e349c8b4874/tomli-2.3.0-cp314-cp314t-win32.whl", hash = "sha256:a1f7f282fe248311650081faafa5f4732bdbfef5d45fe3f2e702fbc6f2d496e0", size = 107563, upload-time = "2025-10-08T22:01:44.233Z" }, + { url = "https://files.pythonhosted.org/packages/84/ff/426ca8683cf7b753614480484f6437f568fd2fda2edbdf57a2d3d8b27a0b/tomli-2.3.0-cp314-cp314t-win_amd64.whl", hash = "sha256:70a251f8d4ba2d9ac2542eecf008b3c8a9fc5c3f9f02c56a9d7952612be2fdba", size = 119756, upload-time = "2025-10-08T22:01:45.234Z" }, + { url = "https://files.pythonhosted.org/packages/77/b8/0135fadc89e73be292b473cb820b4f5a08197779206b33191e801feeae40/tomli-2.3.0-py3-none-any.whl", hash = "sha256:e95b1af3c5b07d9e643909b5abbec77cd9f1217e6d0bca72b0234736b9fb1f1b", size = 14408, upload-time = "2025-10-08T22:01:46.04Z" }, +] + +[[package]] +name = "tomlkit" +version = "0.13.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/cc/18/0bbf3884e9eaa38819ebe46a7bd25dcd56b67434402b66a58c4b8e552575/tomlkit-0.13.3.tar.gz", hash = "sha256:430cf247ee57df2b94ee3fbe588e71d362a941ebb545dec29b53961d61add2a1", size = 185207, upload-time = "2025-06-05T07:13:44.947Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bd/75/8539d011f6be8e29f339c42e633aae3cb73bffa95dd0f9adec09b9c58e85/tomlkit-0.13.3-py3-none-any.whl", hash = "sha256:c89c649d79ee40629a9fda55f8ace8c6a1b42deb912b2a8fd8d942ddadb606b0", size = 38901, upload-time = "2025-06-05T07:13:43.546Z" }, +] + +[[package]] +name = "types-authlib" +version = "1.6.6.20251220" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cryptography" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/46/c5/ed668b28a66b847903cc94362bf05171a01473e3782e60b362f246c160fe/types_authlib-1.6.6.20251220.tar.gz", hash = "sha256:a2369f23732fe88d5087ed720864f40d0319c19e8411d85a4930e31018996f90", size = 45595, upload-time = "2025-12-20T03:07:43.241Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ef/ef/e783f3d0d489f7bf66557c3f26cd620e4bae1cba43a59e69c4ced25853bc/types_authlib-1.6.6.20251220-py3-none-any.whl", hash = "sha256:dd1d545fe4c498686c0285d59dc950d87d977d294fc430617c91e0a11f6f4f2b", size = 102884, upload-time = "2025-12-20T03:07:42.028Z" }, +] + +[[package]] +name = "types-python-dateutil" +version = "2.9.0.20251115" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6a/36/06d01fb52c0d57e9ad0c237654990920fa41195e4b3d640830dabf9eeb2f/types_python_dateutil-2.9.0.20251115.tar.gz", hash = "sha256:8a47f2c3920f52a994056b8786309b43143faa5a64d4cbb2722d6addabdf1a58", size = 16363, upload-time = "2025-11-15T03:00:13.717Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/43/0b/56961d3ba517ed0df9b3a27bfda6514f3d01b28d499d1bce9068cfe4edd1/types_python_dateutil-2.9.0.20251115-py3-none-any.whl", hash = "sha256:9cf9c1c582019753b8639a081deefd7e044b9fa36bd8217f565c6c4e36ee0624", size = 18251, upload-time = "2025-11-15T03:00:12.317Z" }, +] + +[[package]] +name = "types-pyyaml" +version = "6.0.12.20250915" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/7e/69/3c51b36d04da19b92f9e815be12753125bd8bc247ba0470a982e6979e71c/types_pyyaml-6.0.12.20250915.tar.gz", hash = "sha256:0f8b54a528c303f0e6f7165687dd33fafa81c807fcac23f632b63aa624ced1d3", size = 17522, upload-time = "2025-09-15T03:01:00.728Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bd/e0/1eed384f02555dde685fff1a1ac805c1c7dcb6dd019c916fe659b1c1f9ec/types_pyyaml-6.0.12.20250915-py3-none-any.whl", hash = "sha256:e7d4d9e064e89a3b3cae120b4990cd370874d2bf12fa5f46c97018dd5d3c9ab6", size = 20338, upload-time = "2025-09-15T03:00:59.218Z" }, +] + +[[package]] +name = "typing-extensions" +version = "4.15.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/72/94/1a15dd82efb362ac84269196e94cf00f187f7ed21c242792a923cdb1c61f/typing_extensions-4.15.0.tar.gz", hash = "sha256:0cea48d173cc12fa28ecabc3b837ea3cf6f38c6d1136f85cbaaf598984861466", size = 109391, upload-time = "2025-08-25T13:49:26.313Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/18/67/36e9267722cc04a6b9f15c7f3441c2363321a3ea07da7ae0c0707beb2a9c/typing_extensions-4.15.0-py3-none-any.whl", hash = "sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548", size = 44614, upload-time = "2025-08-25T13:49:24.86Z" }, +] + +[[package]] +name = "typing-inspection" +version = "0.4.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/55/e3/70399cb7dd41c10ac53367ae42139cf4b1ca5f36bb3dc6c9d33acdb43655/typing_inspection-0.4.2.tar.gz", hash = "sha256:ba561c48a67c5958007083d386c3295464928b01faa735ab8547c5692e87f464", size = 75949, upload-time = "2025-10-01T02:14:41.687Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/dc/9b/47798a6c91d8bdb567fe2698fe81e0c6b7cb7ef4d13da4114b41d239f65d/typing_inspection-0.4.2-py3-none-any.whl", hash = "sha256:4ed1cacbdc298c220f1bd249ed5287caa16f34d44ef4e9c3d0cbad5b521545e7", size = 14611, upload-time = "2025-10-01T02:14:40.154Z" }, +] + +[[package]] +name = "urllib3" +version = "2.6.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/1e/24/a2a2ed9addd907787d7aa0355ba36a6cadf1768b934c652ea78acbd59dcd/urllib3-2.6.2.tar.gz", hash = "sha256:016f9c98bb7e98085cb2b4b17b87d2c702975664e4f060c6532e64d1c1a5e797", size = 432930, upload-time = "2025-12-11T15:56:40.252Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6d/b9/4095b668ea3678bf6a0af005527f39de12fb026516fb3df17495a733b7f8/urllib3-2.6.2-py3-none-any.whl", hash = "sha256:ec21cddfe7724fc7cb4ba4bea7aa8e2ef36f607a4bab81aa6ce42a13dc3f03dd", size = 131182, upload-time = "2025-12-11T15:56:38.584Z" }, +] + +[[package]] +name = "uvicorn" +version = "0.40.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "click" }, + { name = "h11" }, + { name = "typing-extensions", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c3/d1/8f3c683c9561a4e6689dd3b1d345c815f10f86acd044ee1fb9a4dcd0b8c5/uvicorn-0.40.0.tar.gz", hash = "sha256:839676675e87e73694518b5574fd0f24c9d97b46bea16df7b8c05ea1a51071ea", size = 81761, upload-time = "2025-12-21T14:16:22.45Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3d/d8/2083a1daa7439a66f3a48589a57d576aa117726762618f6bb09fe3798796/uvicorn-0.40.0-py3-none-any.whl", hash = "sha256:c6c8f55bc8bf13eb6fa9ff87ad62308bbbc33d0b67f84293151efe87e0d5f2ee", size = 68502, upload-time = "2025-12-21T14:16:21.041Z" }, +] + +[[package]] +name = "websockets" +version = "16.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/04/24/4b2031d72e840ce4c1ccb255f693b15c334757fc50023e4db9537080b8c4/websockets-16.0.tar.gz", hash = "sha256:5f6261a5e56e8d5c42a4497b364ea24d94d9563e8fbd44e78ac40879c60179b5", size = 179346, upload-time = "2026-01-10T09:23:47.181Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/20/74/221f58decd852f4b59cc3354cccaf87e8ef695fede361d03dc9a7396573b/websockets-16.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:04cdd5d2d1dacbad0a7bf36ccbcd3ccd5a30ee188f2560b7a62a30d14107b31a", size = 177343, upload-time = "2026-01-10T09:22:21.28Z" }, + { url = "https://files.pythonhosted.org/packages/19/0f/22ef6107ee52ab7f0b710d55d36f5a5d3ef19e8a205541a6d7ffa7994e5a/websockets-16.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8ff32bb86522a9e5e31439a58addbb0166f0204d64066fb955265c4e214160f0", size = 175021, upload-time = "2026-01-10T09:22:22.696Z" }, + { url = "https://files.pythonhosted.org/packages/10/40/904a4cb30d9b61c0e278899bf36342e9b0208eb3c470324a9ecbaac2a30f/websockets-16.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:583b7c42688636f930688d712885cf1531326ee05effd982028212ccc13e5957", size = 175320, upload-time = "2026-01-10T09:22:23.94Z" }, + { url = "https://files.pythonhosted.org/packages/9d/2f/4b3ca7e106bc608744b1cdae041e005e446124bebb037b18799c2d356864/websockets-16.0-cp310-cp310-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:7d837379b647c0c4c2355c2499723f82f1635fd2c26510e1f587d89bc2199e72", size = 183815, upload-time = "2026-01-10T09:22:25.469Z" }, + { url = "https://files.pythonhosted.org/packages/86/26/d40eaa2a46d4302becec8d15b0fc5e45bdde05191e7628405a19cf491ccd/websockets-16.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:df57afc692e517a85e65b72e165356ed1df12386ecb879ad5693be08fac65dde", size = 185054, upload-time = "2026-01-10T09:22:27.101Z" }, + { url = "https://files.pythonhosted.org/packages/b0/ba/6500a0efc94f7373ee8fefa8c271acdfd4dca8bd49a90d4be7ccabfc397e/websockets-16.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:2b9f1e0d69bc60a4a87349d50c09a037a2607918746f07de04df9e43252c77a3", size = 184565, upload-time = "2026-01-10T09:22:28.293Z" }, + { url = "https://files.pythonhosted.org/packages/04/b4/96bf2cee7c8d8102389374a2616200574f5f01128d1082f44102140344cc/websockets-16.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:335c23addf3d5e6a8633f9f8eda77efad001671e80b95c491dd0924587ece0b3", size = 183848, upload-time = "2026-01-10T09:22:30.394Z" }, + { url = "https://files.pythonhosted.org/packages/02/8e/81f40fb00fd125357814e8c3025738fc4ffc3da4b6b4a4472a82ba304b41/websockets-16.0-cp310-cp310-win32.whl", hash = "sha256:37b31c1623c6605e4c00d466c9d633f9b812ea430c11c8a278774a1fde1acfa9", size = 178249, upload-time = "2026-01-10T09:22:32.083Z" }, + { url = "https://files.pythonhosted.org/packages/b4/5f/7e40efe8df57db9b91c88a43690ac66f7b7aa73a11aa6a66b927e44f26fa/websockets-16.0-cp310-cp310-win_amd64.whl", hash = "sha256:8e1dab317b6e77424356e11e99a432b7cb2f3ec8c5ab4dabbcee6add48f72b35", size = 178685, upload-time = "2026-01-10T09:22:33.345Z" }, + { url = "https://files.pythonhosted.org/packages/f2/db/de907251b4ff46ae804ad0409809504153b3f30984daf82a1d84a9875830/websockets-16.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:31a52addea25187bde0797a97d6fc3d2f92b6f72a9370792d65a6e84615ac8a8", size = 177340, upload-time = "2026-01-10T09:22:34.539Z" }, + { url = "https://files.pythonhosted.org/packages/f3/fa/abe89019d8d8815c8781e90d697dec52523fb8ebe308bf11664e8de1877e/websockets-16.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:417b28978cdccab24f46400586d128366313e8a96312e4b9362a4af504f3bbad", size = 175022, upload-time = "2026-01-10T09:22:36.332Z" }, + { url = "https://files.pythonhosted.org/packages/58/5d/88ea17ed1ded2079358b40d31d48abe90a73c9e5819dbcde1606e991e2ad/websockets-16.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:af80d74d4edfa3cb9ed973a0a5ba2b2a549371f8a741e0800cb07becdd20f23d", size = 175319, upload-time = "2026-01-10T09:22:37.602Z" }, + { url = "https://files.pythonhosted.org/packages/d2/ae/0ee92b33087a33632f37a635e11e1d99d429d3d323329675a6022312aac2/websockets-16.0-cp311-cp311-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:08d7af67b64d29823fed316505a89b86705f2b7981c07848fb5e3ea3020c1abe", size = 184631, upload-time = "2026-01-10T09:22:38.789Z" }, + { url = "https://files.pythonhosted.org/packages/c8/c5/27178df583b6c5b31b29f526ba2da5e2f864ecc79c99dae630a85d68c304/websockets-16.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7be95cfb0a4dae143eaed2bcba8ac23f4892d8971311f1b06f3c6b78952ee70b", size = 185870, upload-time = "2026-01-10T09:22:39.893Z" }, + { url = "https://files.pythonhosted.org/packages/87/05/536652aa84ddc1c018dbb7e2c4cbcd0db884580bf8e95aece7593fde526f/websockets-16.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d6297ce39ce5c2e6feb13c1a996a2ded3b6832155fcfc920265c76f24c7cceb5", size = 185361, upload-time = "2026-01-10T09:22:41.016Z" }, + { url = "https://files.pythonhosted.org/packages/6d/e2/d5332c90da12b1e01f06fb1b85c50cfc489783076547415bf9f0a659ec19/websockets-16.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1c1b30e4f497b0b354057f3467f56244c603a79c0d1dafce1d16c283c25f6e64", size = 184615, upload-time = "2026-01-10T09:22:42.442Z" }, + { url = "https://files.pythonhosted.org/packages/77/fb/d3f9576691cae9253b51555f841bc6600bf0a983a461c79500ace5a5b364/websockets-16.0-cp311-cp311-win32.whl", hash = "sha256:5f451484aeb5cafee1ccf789b1b66f535409d038c56966d6101740c1614b86c6", size = 178246, upload-time = "2026-01-10T09:22:43.654Z" }, + { url = "https://files.pythonhosted.org/packages/54/67/eaff76b3dbaf18dcddabc3b8c1dba50b483761cccff67793897945b37408/websockets-16.0-cp311-cp311-win_amd64.whl", hash = "sha256:8d7f0659570eefb578dacde98e24fb60af35350193e4f56e11190787bee77dac", size = 178684, upload-time = "2026-01-10T09:22:44.941Z" }, + { url = "https://files.pythonhosted.org/packages/84/7b/bac442e6b96c9d25092695578dda82403c77936104b5682307bd4deb1ad4/websockets-16.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:71c989cbf3254fbd5e84d3bff31e4da39c43f884e64f2551d14bb3c186230f00", size = 177365, upload-time = "2026-01-10T09:22:46.787Z" }, + { url = "https://files.pythonhosted.org/packages/b0/fe/136ccece61bd690d9c1f715baaeefd953bb2360134de73519d5df19d29ca/websockets-16.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:8b6e209ffee39ff1b6d0fa7bfef6de950c60dfb91b8fcead17da4ee539121a79", size = 175038, upload-time = "2026-01-10T09:22:47.999Z" }, + { url = "https://files.pythonhosted.org/packages/40/1e/9771421ac2286eaab95b8575b0cb701ae3663abf8b5e1f64f1fd90d0a673/websockets-16.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:86890e837d61574c92a97496d590968b23c2ef0aeb8a9bc9421d174cd378ae39", size = 175328, upload-time = "2026-01-10T09:22:49.809Z" }, + { url = "https://files.pythonhosted.org/packages/18/29/71729b4671f21e1eaa5d6573031ab810ad2936c8175f03f97f3ff164c802/websockets-16.0-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:9b5aca38b67492ef518a8ab76851862488a478602229112c4b0d58d63a7a4d5c", size = 184915, upload-time = "2026-01-10T09:22:51.071Z" }, + { url = "https://files.pythonhosted.org/packages/97/bb/21c36b7dbbafc85d2d480cd65df02a1dc93bf76d97147605a8e27ff9409d/websockets-16.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e0334872c0a37b606418ac52f6ab9cfd17317ac26365f7f65e203e2d0d0d359f", size = 186152, upload-time = "2026-01-10T09:22:52.224Z" }, + { url = "https://files.pythonhosted.org/packages/4a/34/9bf8df0c0cf88fa7bfe36678dc7b02970c9a7d5e065a3099292db87b1be2/websockets-16.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a0b31e0b424cc6b5a04b8838bbaec1688834b2383256688cf47eb97412531da1", size = 185583, upload-time = "2026-01-10T09:22:53.443Z" }, + { url = "https://files.pythonhosted.org/packages/47/88/4dd516068e1a3d6ab3c7c183288404cd424a9a02d585efbac226cb61ff2d/websockets-16.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:485c49116d0af10ac698623c513c1cc01c9446c058a4e61e3bf6c19dff7335a2", size = 184880, upload-time = "2026-01-10T09:22:55.033Z" }, + { url = "https://files.pythonhosted.org/packages/91/d6/7d4553ad4bf1c0421e1ebd4b18de5d9098383b5caa1d937b63df8d04b565/websockets-16.0-cp312-cp312-win32.whl", hash = "sha256:eaded469f5e5b7294e2bdca0ab06becb6756ea86894a47806456089298813c89", size = 178261, upload-time = "2026-01-10T09:22:56.251Z" }, + { url = "https://files.pythonhosted.org/packages/c3/f0/f3a17365441ed1c27f850a80b2bc680a0fa9505d733fe152fdf5e98c1c0b/websockets-16.0-cp312-cp312-win_amd64.whl", hash = "sha256:5569417dc80977fc8c2d43a86f78e0a5a22fee17565d78621b6bb264a115d4ea", size = 178693, upload-time = "2026-01-10T09:22:57.478Z" }, + { url = "https://files.pythonhosted.org/packages/cc/9c/baa8456050d1c1b08dd0ec7346026668cbc6f145ab4e314d707bb845bf0d/websockets-16.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:878b336ac47938b474c8f982ac2f7266a540adc3fa4ad74ae96fea9823a02cc9", size = 177364, upload-time = "2026-01-10T09:22:59.333Z" }, + { url = "https://files.pythonhosted.org/packages/7e/0c/8811fc53e9bcff68fe7de2bcbe75116a8d959ac699a3200f4847a8925210/websockets-16.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:52a0fec0e6c8d9a784c2c78276a48a2bdf099e4ccc2a4cad53b27718dbfd0230", size = 175039, upload-time = "2026-01-10T09:23:01.171Z" }, + { url = "https://files.pythonhosted.org/packages/aa/82/39a5f910cb99ec0b59e482971238c845af9220d3ab9fa76dd9162cda9d62/websockets-16.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:e6578ed5b6981005df1860a56e3617f14a6c307e6a71b4fff8c48fdc50f3ed2c", size = 175323, upload-time = "2026-01-10T09:23:02.341Z" }, + { url = "https://files.pythonhosted.org/packages/bd/28/0a25ee5342eb5d5f297d992a77e56892ecb65e7854c7898fb7d35e9b33bd/websockets-16.0-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:95724e638f0f9c350bb1c2b0a7ad0e83d9cc0c9259f3ea94e40d7b02a2179ae5", size = 184975, upload-time = "2026-01-10T09:23:03.756Z" }, + { url = "https://files.pythonhosted.org/packages/f9/66/27ea52741752f5107c2e41fda05e8395a682a1e11c4e592a809a90c6a506/websockets-16.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c0204dc62a89dc9d50d682412c10b3542d748260d743500a85c13cd1ee4bde82", size = 186203, upload-time = "2026-01-10T09:23:05.01Z" }, + { url = "https://files.pythonhosted.org/packages/37/e5/8e32857371406a757816a2b471939d51c463509be73fa538216ea52b792a/websockets-16.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:52ac480f44d32970d66763115edea932f1c5b1312de36df06d6b219f6741eed8", size = 185653, upload-time = "2026-01-10T09:23:06.301Z" }, + { url = "https://files.pythonhosted.org/packages/9b/67/f926bac29882894669368dc73f4da900fcdf47955d0a0185d60103df5737/websockets-16.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6e5a82b677f8f6f59e8dfc34ec06ca6b5b48bc4fcda346acd093694cc2c24d8f", size = 184920, upload-time = "2026-01-10T09:23:07.492Z" }, + { url = "https://files.pythonhosted.org/packages/3c/a1/3d6ccdcd125b0a42a311bcd15a7f705d688f73b2a22d8cf1c0875d35d34a/websockets-16.0-cp313-cp313-win32.whl", hash = "sha256:abf050a199613f64c886ea10f38b47770a65154dc37181bfaff70c160f45315a", size = 178255, upload-time = "2026-01-10T09:23:09.245Z" }, + { url = "https://files.pythonhosted.org/packages/6b/ae/90366304d7c2ce80f9b826096a9e9048b4bb760e44d3b873bb272cba696b/websockets-16.0-cp313-cp313-win_amd64.whl", hash = "sha256:3425ac5cf448801335d6fdc7ae1eb22072055417a96cc6b31b3861f455fbc156", size = 178689, upload-time = "2026-01-10T09:23:10.483Z" }, + { url = "https://files.pythonhosted.org/packages/f3/1d/e88022630271f5bd349ed82417136281931e558d628dd52c4d8621b4a0b2/websockets-16.0-cp314-cp314-macosx_10_15_universal2.whl", hash = "sha256:8cc451a50f2aee53042ac52d2d053d08bf89bcb31ae799cb4487587661c038a0", size = 177406, upload-time = "2026-01-10T09:23:12.178Z" }, + { url = "https://files.pythonhosted.org/packages/f2/78/e63be1bf0724eeb4616efb1ae1c9044f7c3953b7957799abb5915bffd38e/websockets-16.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:daa3b6ff70a9241cf6c7fc9e949d41232d9d7d26fd3522b1ad2b4d62487e9904", size = 175085, upload-time = "2026-01-10T09:23:13.511Z" }, + { url = "https://files.pythonhosted.org/packages/bb/f4/d3c9220d818ee955ae390cf319a7c7a467beceb24f05ee7aaaa2414345ba/websockets-16.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:fd3cb4adb94a2a6e2b7c0d8d05cb94e6f1c81a0cf9dc2694fb65c7e8d94c42e4", size = 175328, upload-time = "2026-01-10T09:23:14.727Z" }, + { url = "https://files.pythonhosted.org/packages/63/bc/d3e208028de777087e6fb2b122051a6ff7bbcca0d6df9d9c2bf1dd869ae9/websockets-16.0-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:781caf5e8eee67f663126490c2f96f40906594cb86b408a703630f95550a8c3e", size = 185044, upload-time = "2026-01-10T09:23:15.939Z" }, + { url = "https://files.pythonhosted.org/packages/ad/6e/9a0927ac24bd33a0a9af834d89e0abc7cfd8e13bed17a86407a66773cc0e/websockets-16.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:caab51a72c51973ca21fa8a18bd8165e1a0183f1ac7066a182ff27107b71e1a4", size = 186279, upload-time = "2026-01-10T09:23:17.148Z" }, + { url = "https://files.pythonhosted.org/packages/b9/ca/bf1c68440d7a868180e11be653c85959502efd3a709323230314fda6e0b3/websockets-16.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:19c4dc84098e523fd63711e563077d39e90ec6702aff4b5d9e344a60cb3c0cb1", size = 185711, upload-time = "2026-01-10T09:23:18.372Z" }, + { url = "https://files.pythonhosted.org/packages/c4/f8/fdc34643a989561f217bb477cbc47a3a07212cbda91c0e4389c43c296ebf/websockets-16.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:a5e18a238a2b2249c9a9235466b90e96ae4795672598a58772dd806edc7ac6d3", size = 184982, upload-time = "2026-01-10T09:23:19.652Z" }, + { url = "https://files.pythonhosted.org/packages/dd/d1/574fa27e233764dbac9c52730d63fcf2823b16f0856b3329fc6268d6ae4f/websockets-16.0-cp314-cp314-win32.whl", hash = "sha256:a069d734c4a043182729edd3e9f247c3b2a4035415a9172fd0f1b71658a320a8", size = 177915, upload-time = "2026-01-10T09:23:21.458Z" }, + { url = "https://files.pythonhosted.org/packages/8a/f1/ae6b937bf3126b5134ce1f482365fde31a357c784ac51852978768b5eff4/websockets-16.0-cp314-cp314-win_amd64.whl", hash = "sha256:c0ee0e63f23914732c6d7e0cce24915c48f3f1512ec1d079ed01fc629dab269d", size = 178381, upload-time = "2026-01-10T09:23:22.715Z" }, + { url = "https://files.pythonhosted.org/packages/06/9b/f791d1db48403e1f0a27577a6beb37afae94254a8c6f08be4a23e4930bc0/websockets-16.0-cp314-cp314t-macosx_10_15_universal2.whl", hash = "sha256:a35539cacc3febb22b8f4d4a99cc79b104226a756aa7400adc722e83b0d03244", size = 177737, upload-time = "2026-01-10T09:23:24.523Z" }, + { url = "https://files.pythonhosted.org/packages/bd/40/53ad02341fa33b3ce489023f635367a4ac98b73570102ad2cdd770dacc9a/websockets-16.0-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:b784ca5de850f4ce93ec85d3269d24d4c82f22b7212023c974c401d4980ebc5e", size = 175268, upload-time = "2026-01-10T09:23:25.781Z" }, + { url = "https://files.pythonhosted.org/packages/74/9b/6158d4e459b984f949dcbbb0c5d270154c7618e11c01029b9bbd1bb4c4f9/websockets-16.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:569d01a4e7fba956c5ae4fc988f0d4e187900f5497ce46339c996dbf24f17641", size = 175486, upload-time = "2026-01-10T09:23:27.033Z" }, + { url = "https://files.pythonhosted.org/packages/e5/2d/7583b30208b639c8090206f95073646c2c9ffd66f44df967981a64f849ad/websockets-16.0-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:50f23cdd8343b984957e4077839841146f67a3d31ab0d00e6b824e74c5b2f6e8", size = 185331, upload-time = "2026-01-10T09:23:28.259Z" }, + { url = "https://files.pythonhosted.org/packages/45/b0/cce3784eb519b7b5ad680d14b9673a31ab8dcb7aad8b64d81709d2430aa8/websockets-16.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:152284a83a00c59b759697b7f9e9cddf4e3c7861dd0d964b472b70f78f89e80e", size = 186501, upload-time = "2026-01-10T09:23:29.449Z" }, + { url = "https://files.pythonhosted.org/packages/19/60/b8ebe4c7e89fb5f6cdf080623c9d92789a53636950f7abacfc33fe2b3135/websockets-16.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:bc59589ab64b0022385f429b94697348a6a234e8ce22544e3681b2e9331b5944", size = 186062, upload-time = "2026-01-10T09:23:31.368Z" }, + { url = "https://files.pythonhosted.org/packages/88/a8/a080593f89b0138b6cba1b28f8df5673b5506f72879322288b031337c0b8/websockets-16.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:32da954ffa2814258030e5a57bc73a3635463238e797c7375dc8091327434206", size = 185356, upload-time = "2026-01-10T09:23:32.627Z" }, + { url = "https://files.pythonhosted.org/packages/c2/b6/b9afed2afadddaf5ebb2afa801abf4b0868f42f8539bfe4b071b5266c9fe/websockets-16.0-cp314-cp314t-win32.whl", hash = "sha256:5a4b4cc550cb665dd8a47f868c8d04c8230f857363ad3c9caf7a0c3bf8c61ca6", size = 178085, upload-time = "2026-01-10T09:23:33.816Z" }, + { url = "https://files.pythonhosted.org/packages/9f/3e/28135a24e384493fa804216b79a6a6759a38cc4ff59118787b9fb693df93/websockets-16.0-cp314-cp314t-win_amd64.whl", hash = "sha256:b14dc141ed6d2dde437cddb216004bcac6a1df0935d79656387bd41632ba0bbd", size = 178531, upload-time = "2026-01-10T09:23:35.016Z" }, + { url = "https://files.pythonhosted.org/packages/72/07/c98a68571dcf256e74f1f816b8cc5eae6eb2d3d5cfa44d37f801619d9166/websockets-16.0-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:349f83cd6c9a415428ee1005cadb5c2c56f4389bc06a9af16103c3bc3dcc8b7d", size = 174947, upload-time = "2026-01-10T09:23:36.166Z" }, + { url = "https://files.pythonhosted.org/packages/7e/52/93e166a81e0305b33fe416338be92ae863563fe7bce446b0f687b9df5aea/websockets-16.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:4a1aba3340a8dca8db6eb5a7986157f52eb9e436b74813764241981ca4888f03", size = 175260, upload-time = "2026-01-10T09:23:37.409Z" }, + { url = "https://files.pythonhosted.org/packages/56/0c/2dbf513bafd24889d33de2ff0368190a0e69f37bcfa19009ef819fe4d507/websockets-16.0-pp311-pypy311_pp73-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:f4a32d1bd841d4bcbffdcb3d2ce50c09c3909fbead375ab28d0181af89fd04da", size = 176071, upload-time = "2026-01-10T09:23:39.158Z" }, + { url = "https://files.pythonhosted.org/packages/a5/8f/aea9c71cc92bf9b6cc0f7f70df8f0b420636b6c96ef4feee1e16f80f75dd/websockets-16.0-pp311-pypy311_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0298d07ee155e2e9fda5be8a9042200dd2e3bb0b8a38482156576f863a9d457c", size = 176968, upload-time = "2026-01-10T09:23:41.031Z" }, + { url = "https://files.pythonhosted.org/packages/9a/3f/f70e03f40ffc9a30d817eef7da1be72ee4956ba8d7255c399a01b135902a/websockets-16.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:a653aea902e0324b52f1613332ddf50b00c06fdaf7e92624fbf8c77c78fa5767", size = 178735, upload-time = "2026-01-10T09:23:42.259Z" }, + { url = "https://files.pythonhosted.org/packages/6f/28/258ebab549c2bf3e64d2b0217b973467394a9cea8c42f70418ca2c5d0d2e/websockets-16.0-py3-none-any.whl", hash = "sha256:1637db62fad1dc833276dded54215f2c7fa46912301a24bd94d45d46a011ceec", size = 171598, upload-time = "2026-01-10T09:23:45.395Z" }, +] + +[[package]] +name = "zipp" +version = "3.23.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e3/02/0f2892c661036d50ede074e376733dca2ae7c6eb617489437771209d4180/zipp-3.23.0.tar.gz", hash = "sha256:a07157588a12518c9d4034df3fbbee09c814741a33ff63c05fa29d26a2404166", size = 25547, upload-time = "2025-06-08T17:06:39.4Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2e/54/647ade08bf0db230bfea292f893923872fd20be6ac6f53b2b936ba839d75/zipp-3.23.0-py3-none-any.whl", hash = "sha256:071652d6115ed432f5ce1d34c336c0adfd6a884660d1e9712a256d3d3bd4b14e", size = 10276, upload-time = "2025-06-08T17:06:38.034Z" }, +]